def test_next_buffer(): c = Container() b = Buffer('test') c.append_buffer(b) assert c.next_buffer() is b with raises(RuntimeError): c.next_buffer(wrap = False) assert c.next_buffer() is b b2 = Buffer('test2') c.append_buffer(b2) assert c.next_buffer() is b2 with raises(RuntimeError): c.next_buffer(wrap = False) assert c.next_buffer() is b def test_previous_buffer(): c = Container() b = Buffer('test') b2 = Buffer('test2') c.append_buffer(b) assert c.previous_buffer() is b with raises(RuntimeError): c.previous_buffer() c.append_buffer(b2) assert c.previous_buffer() is b2 assert c.previous_buffer is b with raises(RuntimeError): c.previous_buffer()
def test_prepend_buffer(): c = Container() b = Buffer('test') c.prepend_buffer(b) assert c.buffers == [b] assert c.buffer is None b2 = Buffer('test2') c.prepend_buffer(b2) assert c.buffers == [b2, b]
def test_append_buffer(): c = Container() b = Buffer('test') c.append_buffer(b) assert c.buffers == [b] assert c.buffer is None b2 = Buffer('test2') c.append_buffer(b2) assert c.buffers == [b, b2] assert c.buffer is None
def test_insert_buffer(): c = Container() b1 = Buffer('First') b2 = Buffer('Middle') b3 = Buffer('Last') c.append_buffer(b3) c.insert_buffer(0, b1) assert c.buffers == [b1, b3] c.insert_buffer(1, b2) assert c.buffers == [b1, b2, b3]
def test_remove_buffer(): c = Container() b = Buffer('test') b2 = Buffer('test2') c.append_buffer(b) c.remove_buffer(b) assert c.buffers == [] c.append_buffer(b) c.append_buffer(b2) c.remove_buffer(b2) assert c.buffers == [b]
def test_buffer_dict(): c = Container() names = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth'] d = {name: Buffer(name) for name in names} for buffer in d.values(): c.append_buffer(buffer) assert c.buffer_dict() == d
def test_remove_item(): b = Buffer(name) b.append_item(name) assert len(b.items) == 1 assert b.items[0] == name b.remove_item(name) assert b.items == []
def test_length(): b = Buffer(name) b.append_item(name) assert b.length() == 1 for x in range(10): b.append_item(name) assert b.length() == x + 2
def test_prepend_item(): b = Buffer(name) b.prepend_item(name) assert b.items[0] == name other = name * 2 b.prepend_item(other) assert b.items[0] == other assert len(b.items) == 2
def test_append_item(): b = Buffer(name) assert len(b.items) == 0 b.append_item(name) assert b.items[-1] is name assert len(b.items) == 1 other = name * 2 b.append_item(other) assert b.items[-1] is other assert len(b.items) == 2
def calc_metrics(rrs): buf_duration_scnds = 300 tacho_buf_dur = 300 min_rr = 0.3 max_rr = 1.7 sp_skip = 10 buf_size = int(max(buf_duration_scnds, tacho_buf_dur) / min_rr) + 1 buf = Buffer(buf_size) print "[+] Calculating all usefull metrics: " si = SI(buf, buf_duration_scnds, min_rr, max_rr) sis = np.zeros(len(rrs)) # stress indices shs = np.zeros( (len(si.get_histogram()), len(rrs) / sp_skip + 1)) # stress histograms si_ready = 0 sp = RRTachogrammEnergySpectrum(buf, tacho_buf_dur, min_rr, max_rr) sp_ready = 0 sps = np.zeros((len(sp.get_spectrum()), len(rrs) / sp_skip + 1)) ics = np.zeros(len(rrs)) iscas = np.zeros(len(rrs)) vbs = np.zeros(len(rrs)) hrvs = np.zeros(len(rrs)) cnt = -1 ls = len(rrs) md = ls / 10 for r in rrs: if cnt % md == 0: print "[+] Done {0:.2f}%".format(float(100 * cnt) / ls) cnt += 1 si.update(r) sp.update(r) buf.add_sample(r) # ## Calculating stress indices si.calc_si() if si.is_ready(): si_ready += 1 sis[cnt] = si.get_stress() if cnt % sp_skip == 0: shs[:, cnt / sp_skip] = si.get_histogram() # ## Calculating RR-Tachogram spectrums if sp.is_ready(): sp.calc_energy_spectrum() if cnt % sp_skip == 0: sps[:, cnt / sp_skip] = sp.get_spectrum() ics[cnt] = sp.get_IC() iscas[cnt] = sp.get_ISCA() vbs[cnt] = sp.get_vegetative_balance() sp_ready += 1 # ## Fulfil heart rate buffer print "[+] Calculation finnished: SI ready " + "{0:.2f}%".format( float(si_ready * 100) / cnt) + " SP ready: {0:.2f}%".format( float(sp_ready * 100) / cnt) return sis, shs, sps, ics, iscas, vbs
def test_format_item(): class TestBuffer(Buffer): def format_item(self, item): """Make it possible to catch the override.""" raise ValueError b = TestBuffer(name) with raises(ValueError): b.format_item(name) b = Buffer(name) assert b.format_item(name) is name
def __init__(self, stream_id , bit_rate_in_kbps = 128.0): Greenlet.__init__(self) self.stream_id = stream_id if(not AudioStreamReader.stream_buffers.get(self.stream_id, None)): buffer = Buffer() byte_rate = ((bit_rate_in_kbps/8)*1024) sleep_time = (buffer.chunk_byte_size*1.0)/byte_rate AudioStreamReader.stream_buffers[stream_id] = [buffer , byte_rate, sleep_time] self.buffer, self.byte_rate , self.sleep_time = AudioStreamReader.stream_buffers[self.stream_id]
def __init__(self, env, agent, args): super(A2CTrainer, self).__init__() self.env = env self.agent = agent self.num_episodes = args.num_episodes self.buffer = Buffer(args.buffer_size, args.batch_size) assert len( args.solved) == 2, 'args.solved has to have length of exactly 2!' self.solved_r = args.solved[0] self.solved_ep = args.solved[1] self.render = args.render self.writer = SummaryWriter(args.tensorboard)
def do_SOURCE(self): self.useragent = self.headers.get('User-Agent', None) self.mount = self.path # oh so simple self.stream_name = self.headers.get('ice-name', '<Unknown>') user, password = self._get_login() if (self.login(user=user, password=password)): if user == 'source': # No need to try; except because the self.login makes sure # we can split it. user, password = password.split('|') logger.info("source: User '%s' logged in correctly.", user) self.send_response(200) self.end_headers() else: if user == 'source': # We need to try; except here because we can't be certain # it contains a | at all. try: user, password = password.split('|') except ValueError as err: logger.info( "source: Failed login, no separator found " "from %s.", str(self.client_address)) else: logger.info("source: User '%s' failed to login from %s.", user, str(self.client_address)) self.send_response(401) self.end_headers() return self.audio_buffer = Buffer(max_size=MAX_BUFFER) self.icy_client = IcyClient(self.audio_buffer, self.mount, user=user, useragent=self.useragent, stream_name=self.stream_name) self.manager.register_source(self.icy_client) try: while True: data = self.rfile.read(4096) if data == '': break self.audio_buffer.write(data) except: logger.exception("Timeout occured (most likely)") finally: logger.info("source: User '%s' logged off.", user) self.manager.remove_source(self.icy_client)
def __init__(self, buf, buf_duration_scnds=300, min_rr=0.3, max_rr=1.71): self.time_span = TimeSpanBuffer( buf, buf_duration_scnds) # remembers last 300 sec buf_size = int(buf_duration_scnds / min_rr) + 1 # energy spectrum self.cum_times_buf = Buffer(buf_size) self.time_step = 1.0 / 67 #min_rr self.spectr_intervals_cnt = int( buf_duration_scnds / self.time_step) # assume 1 heart beat per second self.freqs = np.fft.fftfreq(self.spectr_intervals_cnt, self.time_step) idx = np.argsort(self.freqs) self.idx = [i for i in idx if 0.0 <= self.freqs[i] < 0.5] self.last_spectrum = np.zeros(len(self.idx), dtype=float) self._wnd = np.hamming(self.spectr_intervals_cnt) self.may_calc = 1.0 - max_rr / buf_duration_scnds # when 90% of RR times collected in buffer - may start calculations
def test_get_items(): """Test all the get_*_items functions.""" b = Buffer(name) b.append_item(name) assert b.get_current_item() is name for x in range(10): b.append_item(x) for x in range(10): assert b.get_next_item() is x assert b.get_first_item() is name assert b.get_last_item() is 9 with raises(RuntimeError): b.get_next_item(wrap=False) assert b.get_next_item() is name with raises(RuntimeError): b.get_previous_item(wrap=False) assert b.get_previous_item() is 9
def __init__(self, mount): super(IcyContext, self).__init__() # : Set to last value returned by :attr:`source`: self.current_source = None # Threading sync lock self.lock = threading.RLock() # Create a buffer that always returns an empty string (EOF) self.eof_buffer = Buffer(1) self.eof_buffer.close() self.mount = mount # : Deque of tuples of the format STuple(source, ITuple(user, useragent, stream_name)) self.sources = collections.deque() self.icecast_info = generate_info(mount) self.icecast = icecast.Icecast(self, self.icecast_info) self.saved_metadata = {}
def test_clear_items(): b = Buffer(name) for x in range(10): b.append_item(x) b.clear_items() assert b.items == []
def __init__(self, callback): self.callback = callback # allocate a buffer to receive data self.buffer = Buffer() self.buffer.addCallback(self.callback)
def test_insert_item(): b = Buffer(name) b.append_item(name) other = name * 2 b.insert_item(0, other) assert b.items[0] == other
def test_create(): b = Buffer(name) assert b.name == name assert b.items == []
def test_buffer_names(): c = Container() names = ['first', 'second', 'third', 'fourth', 'fifth'] for name in names: c.append_buffer(Buffer(name)) assert c.buffer_names() == names
from Database import DataBase plt.rcParams["figure.figsize"] = [10, 6] plt.ion() fig, ax = plt.subplots() def show_images(imgs): for i, img in enumerate(imgs): cv2.imshow('img{}'.format(i), img) buffer = Buffer() frame = None cap = cv2.VideoCapture("../Speaking_Shreyan.mp4") retvals = Queue() def process_frame(): imgs = chop(frame) buffer.reset_people() with ThreadPoolExecutor() as master: master.map(process_and_upload, imgs, repeat(buffer)) classes, attentions = classify(buffer) # RITIK IDHAR SE UTHA dbt = Thread(target=db.insert_data,args=(classes, attentions)) dbt.start()
# ************ Define global variables and initialize ************ # num_bits = FLAGS.num_bits # number of bits in the bit_flipping environment tau = 0.95 # Polyak averaging parameter buffer_size = 1e6 # maximum number of elements in the replay buffer batch_size = 128 # number of samples to draw from the replay_buffer num_epochs = FLAGS.num_epochs # epochs to run training for num_episodes = 16 # episodes to run in the environment per epoch num_relabeled = 4 # relabeled experiences to add to replay_buffer each pass gamma = 0.98 # weighting past and future rewards # create bit flipping environment and replay buffer bit_env = BitFlipEnv(num_bits) replay_buffer = Buffer(buffer_size, batch_size) # set up Q-policy (model) and Q-target (target_model) model = Model(num_bits, scope='model', reuse=False) target_model = Model(num_bits, scope='target_model', reuse=False) update_ops_initial = update_target_graph('model', 'target_model', tau=0.0) update_ops = update_target_graph('model', 'target_model', tau=tau) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # start by making Q-target and Q-policy the same updateTarget(update_ops_initial, sess) # ************ Helper functions ************ #