コード例 #1
0
def main():
    camera_dims = (180, 240)  # Dimensions of a DAVIS240
    pixel_location = (6, 8
                      )  # location of pixel whose firing rate is to be studied
    filename = 'events'
    directory = 'slider_depth/'
    events = utils.read_data('data/' + directory + filename + '.txt')

    single_pixel_events = eventpy.dict_single_pixel_events(
        events, pixel_location)
    single_pixel_intensity = utils.read_intensity_at_location(
        pixel_location,
        'data/slider_depth/',
        'data/slider_depth/images.txt',
        log='yes')
    delta_mod_single_pixel_events = eventpy.dict_delta_mod_single_pixel_events(
        events, pixel_location, 2)

    utils.plot_dictionary(
        data=single_pixel_events,
        title='Events of pixel ({},{}) for the file {}'.format(
            pixel_location[0], pixel_location[1], filename),
        xlimits=(events[0][0], events[-1][0]),
        xlabel='Time in s',
        ylabel='Events',
        type='stem')

    utils.plot_dictionary(
        data=delta_mod_single_pixel_events,
        title='Delta Modulated Log Intensity of pixel ({},{}) for {}'.format(
            pixel_location[0], pixel_location[1], filename),
        xlimits=(events[0][0], events[-1][0]),
        xlabel='Time in s',
        ylabel='Log Intensity',
        type='step')

    utils.plot_dictionary(
        data=single_pixel_intensity,
        title='Intensity of pixel ({},{}) for the file {}'.format(
            pixel_location[0], pixel_location[1], filename),
        xlimits=(events[0][0], events[-1][0]),
        xlabel='Time in s',
        ylabel='Intensity')

    utils.make_video('data/slider_depth/images/',
                     video_name='videos/slider_depth.avi')

    utils.plot_multiple_dictionaries(single_pixel_intensity,
                                     delta_mod_single_pixel_events,
                                     single_pixel_events)

    utils.compare_plots(delta_mod_single_pixel_events, single_pixel_events,
                        'Log Intensity', 'Event Data')
コード例 #2
0
ファイル: bot.py プロジェクト: aanand/howaboutnow
    def reply_to_tweet(self, tweet, prefix):
        query = extract_query(tweet.text)
        if query is None:
            self.log("Couldn't find a query in {}".format(self._tweet_url(tweet)))
            return

        try:
            filename = make_video(query)
        except NotEnoughImages as e:
            self.log(str(e))
            return

        if self._is_silent():
            self.log("Silent mode is on. Would've responded to {} with {}".format(
                self._tweet_url(tweet), filename))
            return

        media_id = upload_media(self.api, filename)

        self.post_tweet(
            "{} {}".format(prefix, query),
            reply_to=tweet,
            media_ids=[media_id],
        )
        self.update_reply_threshold(tweet, prefix)
コード例 #3
0
def eval_rollout(model, prep, initial_frame, n_object, device):
    current_frame = initial_frame.to(device)
    base_graph = nx.complete_graph(n_object)
    graph = dgl.from_networkx(base_graph).to(device)
    pos_buffer = []
    model.eval()
    for step in range(100):
        node_feats, edge_feats = prep(graph, current_frame)
        dummy_relation = torch.zeros(
            edge_feats.shape[0], 1).float().to(device)
        dummy_global = torch.zeros(
            node_feats.shape[0], 1).float().to(device)
        v_pred, _ = model(graph, node_feats[:, 3:5].float(
        ), edge_feats.float(), dummy_global, dummy_relation)
        current_frame[:, [1, 2]] += v_pred*0.001
        current_frame[:, 3:5] = v_pred
        pos_buffer.append(current_frame[:, [1, 2]].cpu().numpy())
    pos_buffer = np.vstack(pos_buffer).reshape(100, n_object, -1)
    make_video(pos_buffer, 'video_model.mp4')
コード例 #4
0
def main(args):
    dataset = Dataset(args.data_path, args.offset_x, args.offset_y,
                      args.batch_size, args.batch_per_video)
    optimizer = keras.optimizers.Adam(lr=1e-4)
    model = ConvLSTM(optimizer, args.init_channel, args.block_num)

    if args.train == 'train':
        dataloader = dataset.train_loader()
        train(dataloader, model, args.epochs, args.steps_per_epoch,
              args.save_path)
        utils.save_model(model, args.save_path)
        x, y = next(dataloader)
        pred = model.predict(x)
        utils.make_image(pred, y)

    elif args.train == 'test':
        video_idx = int(input('예측할 동영상 인덱스를 입력하세요.'))
        x, y = dataset.test_loader(video_idx)
        model = utils.load_model(model, args.save_path)
        pred = test(model, x, y, args.batch_size)
        abnormal, score = anomaly_score(pred, y)
        plt.plot(score)
        plt.savefig('anomaly score.png')
        utils.make_video(pred, abnormal)
コード例 #5
0
          3, 4]], train_data.stat_max[[3, 4]], train_data.stat_min[[3, 4]])

    prepare_layer = PrepareLayer(node_feats, stat).to(device)
    interaction_net = InteractionNet(node_feats, stat).to(device)
    print(interaction_net)
    optimizer = torch.optim.Adam(interaction_net.parameters(), lr=args.lr)
    state_dict = interaction_net.state_dict()

    loss_fn = torch.nn.MSELoss()
    reg_fn = torch.nn.MSELoss(reduction='sum')
    try:
        for e in range(args.epochs):
            last_t = time.time()
            loss = train(optimizer, loss_fn,reg_fn, interaction_net,
                         prepare_layer, train_dataloader, args.lambda_reg, device)
            print("Epoch time: ", time.time()-last_t)
            if e % 1 == 0:
                valid_loss = eval(loss_fn, interaction_net,
                                  prepare_layer, valid_dataloader, device)
                test_full_loss = eval(
                    loss_fn, interaction_net, prepare_layer, test_full_dataloader, device)
                print("Epoch: {}.Loss: Valid: {} Full: {}".format(
                    e, valid_loss, test_full_loss))
    except:
        traceback.print_exc()
    finally:
        if args.visualize:
            eval_rollout(interaction_net, prepare_layer,
                         test_data.first_frame, test_data.n_particles, device)
            make_video(test_data.test_traj[:100, :, [1, 2]], 'video_truth.mp4')
コード例 #6
0
ファイル: make_movie.py プロジェクト: uluturki/rl-ecosystem
import os, sys

import numpy as np
import argparse
import cv2
from cv2 import imread, VideoWriter
from utils import make_video
'''
Generate a movie from images
'''

argparser = argparse.ArgumentParser()

argparser.add_argument('--img_dir', type=str, help='image dir', required=True)

args = argparser.parse_args()

img_dir = args.img_dir

st = 0
ed = len(os.listdir(img_dir))

images = [
    os.path.join(img_dir, '{:d}.png'.format(i + 1)) for i in range(st, ed)
]

make_video(images, os.path.join(img_dir, 'video.avi'))
コード例 #7
0
ファイル: eval.py プロジェクト: deval-maker/becoming_cleverer
                    id_1 = gt_collisions[j]['object'][1]
                    for k in range(len(gt_ids)):
                        if id_0 == gt_ids[k]['id']:
                            id_x = get_identifier(gt_ids[k])
                        if id_1 == gt_ids[k]['id']:
                            id_y = get_identifier(gt_ids[k])

                    # id_0 = get_identifier(gt_ids[gt_collisions[j]['object'][0]])
                    # id_1 = get_identifier(gt_ids[gt_collisions[j]['object'][1]])
                    frame_rels.append([id_x, id_y])

        frames_gt.append([frame_objs, frame_rels, None])

    if args.video:
        path = os.path.join(args.evalf, '%d_gt' % test_list[test_idx])
        make_video(path, frames_gt, H, W, bbox_size, args.back_ground,
                   args.store_img)

    if args.use_attr:
        des_pred['objects'] = []
        for i in range(len(ids_filter)):
            obj = dict()
            obj['color'] = ids_filter[i][0]
            obj['material'] = ids_filter[i][1]
            obj['shape'] = ids_filter[i][2]
            obj['id'] = i
            des_pred['objects'].append(obj)

    ##### prediction from the learned model

    des_pred['predictions'] = []
コード例 #8
0
ファイル: video_demo.py プロジェクト: aptlin/posenet-tvm
def main():
    model = posenet.load_model(args.model)
    model = model.to(DEVICE).eval()

    if args.use_tvm:
        import tvm
        from tvm.contrib import graph_runtime

        with open(args.tvm_graph) as f:
            tvm_graph = f.read()
        tvm_lib = tvm.runtime.load_module(args.tvm_lib)
        with open(args.tvm_params, "rb") as f:
            tvm_params = bytearray(f.read())
        ctx = tvm.cpu()
        module = graph_runtime.create(tvm_graph, tvm_lib, ctx)
        module.load_params(tvm_params)

    if not args.webcam:
        if args.output_dir:
            if not os.path.exists(args.output_dir):
                os.makedirs(args.output_dir)

        filenames = [
            f.path
            for f in os.scandir(args.video_dir)
            if f.is_file() and f.path.endswith((".mp4"))
        ]

        preprocessing_time = []
        inference_time = []
        processing_time = []

        for filename in tqdm(filenames, desc="Processed", unit="files"):
            cap = cv2.VideoCapture(filename)
            if args.use_tvm:
                out = process_capture(model, cap, module=module)
            else:
                out = process_capture(model, cap)

            (
                processed_images,
                video_preprocessing_time,
                video_inference_time,
                video_processing_time,
            ) = out
            preprocessing_time += video_preprocessing_time
            inference_time += video_inference_time
            processing_time += video_processing_time
            if args.output_dir:
                make_video(
                    os.path.join(
                        args.output_dir, os.path.relpath(filename, args.video_dir)
                    ),
                    processed_images,
                    format_code=args.output_format,
                )

        avg_preprocessing_time = np.mean(preprocessing_time)
        avg_postprocessing_time = np.mean(processing_time)
        avg_inference_time = np.mean(inference_time)
        print("=" * 80)
        print(
            "Decoder: {}, TVM Runtime: {}, Resize to {}x{} HxW: {}".format(
                args.decoder,
                "enabled" if args.use_tvm else "disabled",
                args.processing_height,
                args.processing_width,
                "enabled" if args.resize else "disabled",
            )
        )
        print("-" * 80)

        print("Average pre-processing FPS: {:.2f}".format(1 / avg_preprocessing_time))
        print("Average inference FPS: {:.2f}".format(1 / avg_inference_time))
        print("Average post-processing FPS: {:.2f}".format(1 / avg_postprocessing_time))
        print(
            "Average FPS: {:.2f}".format(
                1
                / (
                    avg_postprocessing_time
                    + avg_inference_time
                    + avg_preprocessing_time
                )
            )
        )
    else:
        cap = cv2.VideoCapture(args.cam_id)
        if args.use_tvm:
            process_capture(model, cap, module=module)
        else:
            process_capture(model, cap)
コード例 #9
0
        bg = cv2.add(cLPF * bg, (1 - cLPF) * bgnew)
        
        # create image
        gray = 125*np.ones([height, width], dtype=uint8)
        t = time()
        gray[off>0] = 0
        gray[on>0] = 255

        

        pos_coords = np.where(on)
        if pos_coords:
            for x,y in zip(pos_coords[0], pos_coords[1]):
                event = '{} {} {} 1 \n'.format(t, x, y)
                utils.append_to_event_file(event_file, event)

        
        neg_coords = np.where(off)
        if neg_coords:
            for x,y in zip(neg_coords[0], neg_coords[1]):
                event = '{} {} {} 0 \n'.format(t, x, y)
                utils.append_to_event_file(event_file, event)
       
        cv2.imwrite('event_output/event_frames/' + str(frame).zfill(3)+'.png',cv2.flip(gray,1).astype('uint8'))

    
    event_file.close() #close event file
    utils.make_video('event_output/event_frames/')

    print('DONE!')
コード例 #10
0
                                 opts.dataset, video)
        process_dir = os.path.join(opts.data_dir, opts.phase, "processed",
                                   opts.task, opts.dataset, video)
        output_dir = os.path.join(opts.data_dir, opts.phase, "ECCV18",
                                  opts.task, opts.dataset, video)
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)

        frame_list = glob.glob(os.path.join(input_dir, "*.jpg"))
        output_list = glob.glob(os.path.join(output_dir, "*.jpg"))

        if len(frame_list) == len(output_list) and not opts.redo:
            print("Output frames exist, skip...")
            #if len(glob.glob(os.path.join(input_dir, "*.avi")))==0:
            utils.make_video(output_dir,
                             '%05d.jpg',
                             os.path.join(output_dir, "%s.avi" % (video)),
                             fps=24)
            continue

        ## frame 0
        frame_p1 = utils.read_img(os.path.join(process_dir, "00000.jpg"))
        output_filename = os.path.join(output_dir, "00000.jpg")
        utils.save_img(cv2.resize(frame_p1, (4000, 4000), cv2.INTER_CUBIC),
                       output_filename)

        lstm_state = None

        for t in range(1, len(frame_list)):

            ### load frames
            frame_i1 = utils.read_img(