def get_queries():
    q = './queries'
    f = filter(lambda x: os.path.isdir(os.path.join(q, x)), os.listdir(q))
    folders = sorted([str(folder) for folder in f])
    for folder in folders:
        generate_video(dir=q, name=folder, output_dir="./static/")
    return jsonify({'folders': folders})
Esempio n. 2
0
def update_video(start_date, end_date, n_clicks_timestamp):
    logger.info(f"n_clicks_timestamp={n_clicks_timestamp}")
    latest_file = max(Path("history_data/").glob("dxy_minutes*.json"),
                      key=lambda p: p.stat().st_ctime)
    with open(
            latest_file,
            "r",
            encoding="utf8",
    ) as f:
        history = json.load(f)
    if (start_date and end_date and n_clicks_timestamp
            and (n_clicks_timestamp != -1) and
        (datetime.now() -
         utils.timestamp2datetime(n_clicks_timestamp / 1000)).seconds < 1):
        logger.info("[开始] 更新视频")
        fps = 30
        dpi = 300
        figdir = "assets/figures"
        Path(figdir).mkdir(exist_ok=True, parents=True)
        utils.rmfigures(figdir)
        logger.info("[开始] 生成图片")
        utils.generate_figures(history, provinces_geomap, provinces_list,
                               start_date, end_date, dpi, figdir)
        logger.info("[结束] 生成图片")
        videoname = f"assets/tncg-{start_date.replace('-', '')}-{end_date.replace('-', '')}-{datetime.now().strftime('%Y%m%d%H%M%S')}.mp4"
        logger.info("[开始] 生成视频")
        utils.generate_video(f"{figdir}/%d.png", videoname, 30)
        logger.info("[结束] 生成视频")
        logger.info("[结束] 更新视频")
        src = f"/{videoname}"
        logger.debug(f"src={src}")
        return src
Esempio n. 3
0
def video_upload():
    target = os.path.join(APP_ROOT, 'files/')
    clean_data(target)
    print(target)
    if not os.path.isdir(target):
        os.mkdir(target)
    print(request.files.getlist("file"))
    for upload in request.files.getlist("file"):
        print(upload)
        print("{} is the file name".format(upload.filename))
        filename = upload.filename
        print(filename + "ana henaaa")
        ext = os.path.splitext(filename)[1]
        if (ext == ".mp4"):
            print("File supported moving on...")
        else:
            return render_template("Error.html",
                                   message="""The application supports only mp4
                videos, this format is not supported""")
        destination = "".join([target, filename])
        print("Accept incoming file:", filename)
        print("Save it to:", destination)
        upload.save(destination)
        pre_process(target, destination, filename)
        generate_video(target, filename)
    return render_template("complete_video.html", value=filename)
Esempio n. 4
0
def stylize(args):
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))])
    ds = ImageFolder(args.input_dir, transform=transform)
    dl = DataLoader(ds, batch_size=1)
    print("=> Load from model file %s" % args.model_path)
    net = TransformerNet(args.pad_type)
    net.load_state_dict(torch.load(args.model_path))
    net = net.eval().cuda()
    
    utils.process_dataloader(args, net, dl)
    utils.generate_video(args, dl)
Esempio n. 5
0
def query_preprocess():
    output_dir = "/Users/jiarongqiu/Desktop/CS576/Final/output_videos/"

    dir = "/data/dataset/"
    subs = [
        "flowers", "interview", "movie", "musicvideo", "sports", "starcraft",
        "traffic"
    ]
    for sub in subs:
        generate_video(dir, sub, output_dir)

    dir = "data/query/"
    subs = ["first", "second"] + ["Q3", "Q4", "Q5"] + ["HQ1", "HQ2", "HQ4"]
    for sub in subs:
        generate_video(dir, sub, output_dir)
Esempio n. 6
0
def testing(dataloader):
    # load
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    path = os.path.join(args.store_dir, 'model.pth.tar')
    model = EncoderDecoderConvLSTM(nf=args.n_hidden_dim, in_chan=1)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(path))
    model.to(device)
    # test
    criterion = nn.MSELoss()
    print(f'Started testing on {device}')
    with torch.no_grad():
        for batch in dataloader:
            batch = batch.to(device)
            x, y = batch[:, 0:10, :, :, :], batch[:, 10:, :, :, :].squeeze()
            y_hat = model(x, future_seq=10).squeeze()
            testing_loss = criterion(y_hat, y)
            video_frames = create_array(y_hat, y)
            generate_video(video_array=video_frames,
                           video_filename=args.store_dir + '/result.avi')
            break  # only evaluate one batch
    return testing_loss.cpu()
Esempio n. 7
0
        lines = f.read().split('\n')

    tasks = []
    for line in lines:
        if line == "": continue

        print(f"Parsing: {line}")
        lexer = compiler.Lexer().get()
        pg = compiler.Parser()
        pg.parse()
        parser = pg.get()

        tasks.append(parser.parse(lexer.lex(line)))

    # Default values
    FRAMES = []
    FPS = 15
    LOOP = 1
    output_path = "flipbook.pdf"

    for task in tasks:
        if 'fps' in task: FPS = task['fps']
        if 'loop' in task: LOOP = task['loop']
        if 'frames' in task: FRAMES += task['frames']

    if ('gif' in outfile):
        utils.generate_gif(FPS, LOOP, FRAMES, outfile)
    elif ('avi' in outfile):
        utils.generate_video(FPS, LOOP, FRAMES, outfile)

    print("DONE")
Esempio n. 8
0
            if steps % 10 == 0:
                train_writer.add_summary(summary, steps)

                print("Epoch {}/{}...".format(e + 1, epochs),
                      "Discriminator Loss: {:.4f}...".format(train_loss_d),
                      "Generator Loss: {:.4f}".format(train_loss_g))

            if steps % 100 == 0:
                # At the end of each batch, sample some data from the generator, display and save it.
                # Notice when the generator creates the samples to displaied, we set training to False.
                # That is important for signalling the batch normalization layers to use the population statistics rather
                # than the batch statistics
                gen_samples = sess.run(generator(input_z,
                                                 real_size[2],
                                                 reuse=True,
                                                 training=False),
                                       feed_dict={input_z: sample_z})

                display_images(gen_samples, denomalize=True)

                # save the samples to disk
                if save_video:
                    plt.savefig(folder + "/file%02d.png" % image_counter)
                    image_counter += 1
                    plt.show()

            steps += 1

if save_video:
    utils.generate_video(dataset_name, folder)
Esempio n. 9
0
            yaw = robot.state[1]
            R_wb = np.array([[np.cos(yaw), -np.sin(yaw), 0],
                             [np.sin(yaw), np.cos(yaw), 0], [0, 0, 1]])
            p_wb = np.array(robot.state[0] + (0.177, ))
            T_wb = np.vstack((np.hstack(
                (R_wb, p_wb.reshape(3, 1))), np.array([[0, 0, 0, 1]])))
            world.update_texture(rgb, disp, robot.K_oi,
                                 np.matmul(T_wb, robot.T_bo),
                                 args.floor_threshold)

        if idx_t % args.frame_interval == 0:
            # display every 15 frames
            world.show(data['stamps'][idx_t], robot.trajectory, robot.state[1])
            # world.show_particles(world.ax1, robot.particles)
            # plt.pause(1e-20)    # commented for faster iteration without displaying plot
            plt.savefig(os.path.join(plots_save_path,
                                     'result%05d.png' % idx_t),
                        dpi=150)

    result_save_path = os.path.join(result_dir, str(args.dataset))
    if args.no_noise:
        result_save_path += '_no_noise'
    if args.texture_on:
        result_save_path += '_texture'

    result_save_path = check_and_rename(result_save_path, format='.gif')
    generate_video(plots_save_path, result_save_path)

    plt.show(
    )  # if commented, main script will exit without displaying the final result