コード例 #1
0
    meta, data = loadwav(args.soundfile)

    print("Samplerate: %d" % meta.rate)
    print("Channels: %d" % meta.channels)
    print("Length: %d samples, %d seconds" % (meta.samples, meta.seconds))

    blocksize = meta.rate // args.fps
    blocks = meta.samples // blocksize

    print("%d Frames at %d samples" % (blocks, blocksize))
    term_width = 100

    N = blocksize
    T = 1.0 / blocksize * 1.25
    for n, b in enumerate(audio_chunks(data, blocksize)):
        img = np.zeros((args.height, args.width, 3), np.uint8)
        if args.multichannel and meta.channels > 1:
            reflect = [(1, 1), (-1, 1), (1, -1), (-1, -1)]
            for i in range(meta.channels - 1):
                img = render_frame(img,
                                   spectrum(b.T[i], N),
                                   threshold=args.threshold,
                                   thickness=args.thickness,
                                   spread=args.spread or rms(b.T[i]) * 4,
                                   width=args.width,
                                   height=args.height)
        else:
            if meta.channels > 1:
                b = b.T[0]
            img = render_frame(img,
コード例 #2
0
    parser.add_argument('-r',
                        '--rate',
                        dest='rate',
                        type=float,
                        action='store',
                        help='rate')
    args = parser.parse_args()

    image = np.zeros((args.height, args.width, 3), dtype=np.uint8)
    meta, audio = ap.loadwav(args.audiofile)

    blocksize = meta.rate // args.fps
    blocks = meta.samples // blocksize
    scroll = blocksize // args.height
    last_img = None
    for i, block in enumerate(ap.audio_chunks(audio, blocksize)):
        img = last_img if last_img is not None else image
        img = render_frame(
            img,
            block.T[0] if meta.channels > 1 else block,
            blocksize,
            args.width,
            args.height,
            raw=args.raw,
        )
        cv2.imwrite(os.path.join(args.outdir, '{0:05d}.png'.format(i + 1)),
                    img)
        last_img = np.zeros(img.shape, img.dtype)
        # scroll left
        last_img[:, 0:args.width - scroll] = img[:, scroll:]
        # progress
コード例 #3
0
ファイル: convolve.py プロジェクト: sansculotte/vizzy
    meta, data = loadwav(args.soundfile)

    blocksize: int = meta.rate // args.fps
    blocks: int    = meta.samples // blocksize

    files = glob(args.infile)
    if args.shape:
        try:
            shape = tuple(map(int, args.shape.split(',')))
            assert len(shape) == 2
        except (AssertionError, ValueError):
            print('Can not parse shape parameter, must be: x,y')
    else:
        shape = None

    for n, data_pair in enumerate(zip(audio_chunks(data, blocksize), cycle(files))):
        block, imgfile = data_pair
        padded = "{0:05d}".format(n)
        bitmap = cv2.imread(imgfile, cv2.IMREAD_GRAYSCALE)
        if args.mode == 'fft':
            block = spectrum(block, meta.rate)
        if shape:
            block = np.reshape(block, shape)

        image = fftconvolve(bitmap, block * args.amplify, mode='same')
        cv2.imwrite(path.join(args.outdir, f'{padded}.png'), image)

        percent_finished = int(n / blocks * 100)
        print(f'{percent_finished:>3}%', end='\r', flush=True)
コード例 #4
0
    bitmap = cv2.imread(imagefiles[0])
    if bitmap is None:
        print("Problem loading the first Imagefile")
        sys.exit(-1)
    print("image loaded")

    junk = np.fromfile(args.junkfile, dtype='u1')
    np.roll(junk, args.offset)
    print("junk loaded")

    height, width, colors = bitmap.shape
    blocksize             = meta.rate // args.fps
    blocks                = meta.samples // blocksize

    juncnt = 0
    for n, b in enumerate(audio_chunks(data.T[0], blocksize), 1):
        #if os.path.exists(os.path.join(args.outdir, "frame_%06d.png"%n)):
        #    continue
        if args.length is not None and n-args.start_frame >= args.length:
            continue
        if n < args.start_frame or n > blocks:
            continue
        elif len(imagefiles) > 1:
            bitmap = cv2.imread(imagefiles[n-1 % len(imagefiles)])

        frame = bitmap.copy()
        shift = np.interp(np.arange(height), np.arange(blocksize), b)
        for line in range(height):
            v_line = shift[line] * shift[line]
            if v_line > args.threshold:
                frame[line] = frame[line]