Beispiel #1
0
def drawSamples(drawing, data, width=600, height=400, show_progress=False):
    size = len(data)
    grain = width / size
    path = Path(
        stroke_width=1,
        stroke='black',
        fill='black',
        fill_opacity=0.0,
    )
    path.M(0, height / 2)
    for i, d in enumerate(data):
        x = i * grain
        y = d * height * 0.5 + height * 0.5
        path.L(x, y)
        if show_progress:
            progress(i, size)

    drawing.append(path)
    return drawing
Beispiel #2
0
    N = blocksize
    T = 1.0 / blocksize * 1.25
    for n, b in enumerate(audio_chunks(data, blocksize)):
        img = np.zeros((args.height, args.width, 3), np.uint8)
        if args.multichannel and meta.channels > 1:
            reflect = [(1, 1), (-1, 1), (1, -1), (-1, -1)]
            for i in range(meta.channels - 1):
                img = render_frame(img,
                                   spectrum(b.T[i], N),
                                   threshold=args.threshold,
                                   thickness=args.thickness,
                                   spread=args.spread or rms(b.T[i]) * 4,
                                   width=args.width,
                                   height=args.height)
        else:
            if meta.channels > 1:
                b = b.T[0]
            img = render_frame(img,
                               spectrum(b, N),
                               threshold=args.threshold,
                               thickness=args.thickness,
                               spread=args.spread or rms(b) * 4,
                               width=args.width,
                               height=args.height)
        cv2.imwrite(os.path.join(args.outdir, '{0:05d}.png'.format(n + 1)),
                    img)
        progress(n, blocks)

    sys.stdout.write("\n")
Beispiel #3
0
console("info", "connecting to MQTT broker...")
sleep(2)
client.loop_start()

# Open camera
resolution = (1024, 768)
vs = VideoStream(                   \
    usePiCamera = True,                 \
    resolution = resolution,             \
    framerate = 2                       \
).start()
console("info", "Opening Camera...")
startProgress("Opening Camera")
for _ in range(10):
    sleep(1)
    progress(10 * _)
endProgress()
sleep(1)

# Capture frames
console("info", "Capturing frames")
cnt = 0
while True:
    sys.stdout.flush()
    sys.stdout.write("\r[+] frame %d" % cnt)
    cnt += 1
    frame = imutils.resize(vs.read(), width=newwidth, height=newheight)
    # print(frame)
    retval, buffer = cv2.imencode('.jpg', frame)
    jpg_as_text = base64.b64encode(buffer)
    # print(jpg_as_text)
Beispiel #4
0
def progress(msg, br=False):
    lib.progress("[AUGMENT] %s"%msg,br)
### mk_analysis_expStratifiedRecall

from __future__ import division
import sys
_,data_dirpath,nDiv,method_name,topK = sys.argv
nDiv,topK = int(nDiv),int(topK)
from lib import progress, gen_path, gen_data_filename, gen_mk_out_filename

### items 
fin_filename = gen_path(data_dirpath, "items.dat")
items = []
with open(fin_filename) as fin:
    progress("reading %s..."%fin_filename)
    for line in fin:
        i = int(line)
        items.append(i)

recall_at = {}
for d in range(1, nDiv):
    ### read train data
    fin_filename = gen_path(method_name, gen_data_filename(d, nDiv, "train"))
    train_ratings = []
    with open(fin_filename) as fin:
        progress("reading %s..."%fin_filename)
        for line in fin:
            u,i,r,t = line.split()
            u,i,r,t = int(u),int(i),float(r),int(t)
            train_ratings.append((u,i,r,t))

    ### read test data
    fin_filename = gen_path(method_name, gen_data_filename(d, nDiv, "test"))
Beispiel #6
0
def progress(msg, br=False):
    lib.progress("[TRAIN] %s"%msg,br)
Beispiel #7
0
                    '--size',
                    dest='size',
                    type=int,
                    action='store',
                    default=50,
                    help='flocksize')
    ap.add_argument(
        '-S',
        '--speed',
        dest='speed',
        type=int,
        action='store',
        help='glider speed. if not specified, speed is randomized 1..5')
    args = ap.parse_args()

    flock = GliderFlock(
        width=args.width,
        height=args.height,
        length=args.length,
        size=args.size,
        speed=args.speed,
    )

    for i in range(args.iterations):
        flock.step()
        cv2.imwrite(os.path.join(args.outdir, '{:05d}.png'.format(i)),
                    flock.array * 255)
        progress(i, args.iterations)

    stdout.write('\n')
Beispiel #8
0
                        dest='rate',
                        type=float,
                        action='store',
                        help='rate')
    args = parser.parse_args()

    image = np.zeros((args.height, args.width, 3), dtype=np.uint8)
    meta, audio = ap.loadwav(args.audiofile)

    blocksize = meta.rate // args.fps
    blocks = meta.samples // blocksize
    scroll = blocksize // args.height
    last_img = None
    for i, block in enumerate(ap.audio_chunks(audio, blocksize)):
        img = last_img if last_img is not None else image
        img = render_frame(
            img,
            block.T[0] if meta.channels > 1 else block,
            blocksize,
            args.width,
            args.height,
            raw=args.raw,
        )
        cv2.imwrite(os.path.join(args.outdir, '{0:05d}.png'.format(i + 1)),
                    img)
        last_img = np.zeros(img.shape, img.dtype)
        # scroll left
        last_img[:, 0:args.width - scroll] = img[:, scroll:]
        # progress
        lib.progress(i, blocks)