Example #1
0
def test_next_buffer():
    c = Container()
    b = Buffer('test')
    c.append_buffer(b)
    assert c.next_buffer() is b
    with raises(RuntimeError):
        c.next_buffer(wrap = False)
    assert c.next_buffer() is b
    b2 = Buffer('test2')
    c.append_buffer(b2)
    assert c.next_buffer() is b2
    with raises(RuntimeError):
        c.next_buffer(wrap = False)
    assert c.next_buffer() is b


    def test_previous_buffer():
        c = Container()
        b = Buffer('test')
        b2 = Buffer('test2')
        c.append_buffer(b)
        assert c.previous_buffer() is b
        with raises(RuntimeError):
            c.previous_buffer()
        c.append_buffer(b2)
        assert c.previous_buffer() is b2
        assert c.previous_buffer is b
        with raises(RuntimeError):
            c.previous_buffer()
Example #2
0
class Disassembler:
  """Class to Disassemble a decoded packet into headers+data before sending to buffers"""
  def __init__(self, callback):
    self.callback = callback
    # allocate a buffer to receive data
    self.buffer = Buffer()
    self.buffer.addCallback(self.callback)

  def disassemble(self, frame):
    """
    Disassemble received (decoded) frame to headers and data

    Parameters: frame is the raw bytes after decoding url or cookie
    headers are the first 4 bytes, data is what follows.

    should be called from main() after urlEncode.decode(). raw data,
    seqNum are then sent to Buffer.recvData() to flush it.

    we assume data is simply a string
    """

    # split to headers + data
    headers = frame[:4]
    self.retrieveHeaders(headers)

    data = frame[4:]

    # receive, reorder and flush at buffer
#    print "In disassemble: {} {}".format(data, self.buffer.buffer)
    self.buffer.recvData(data, self.seqNum)
    return data

  def retrieveHeaders(self, headers):
    """Extract 4 byte header to seqNum, sessionID, Flags"""

    seqNum, sessionID, flags = parseHeaders(headers)

    self.seqNum = seqNum

    # retrieve flags
    self.flags = flags

    # retrieved header sets the session ID if SYN flag is set
    # also add a check: if SYN flag not checked then
    # self.sessionid == headerTuple[1]

    #if self.flags & (1<<7):
    self.setSessionID(sessionID)

    # TODO: if flags = '1000' i.e. more_data, then send pull_request to
    # server for more data

  def getSessionID(self):
    """Return session ID to upper abstraction"""
    return self.sessionID

  def setSessionID(self, sessionID=0):
    """Set sessionID at client or server"""
    self.sessionID = sessionID
Example #3
0
def test_prepend_buffer():
    c = Container()
    b = Buffer('test')
    c.prepend_buffer(b)
    assert c.buffers == [b]
    assert c.buffer is None
    b2 = Buffer('test2')
    c.prepend_buffer(b2)
    assert c.buffers == [b2, b]
Example #4
0
def test_insert_buffer():
    c = Container()
    b1 = Buffer('First')
    b2 = Buffer('Middle')
    b3 = Buffer('Last')
    c.append_buffer(b3)
    c.insert_buffer(0, b1)
    assert c.buffers == [b1, b3]
    c.insert_buffer(1, b2)
    assert c.buffers == [b1, b2, b3]
Example #5
0
def test_append_buffer():
    c = Container()
    b = Buffer('test')
    c.append_buffer(b)
    assert c.buffers == [b]
    assert c.buffer is None
    b2 = Buffer('test2')
    c.append_buffer(b2)
    assert c.buffers == [b, b2]
    assert c.buffer is None
Example #6
0
   def _post_draw(self):
      Buffer._post_draw(self)

      if self._sprite_texture == None:
         return 

      self._sprite_texture.unbind()

      glDisable(GL_BLEND)
      glDisable(GL_POINT_SPRITE)
Example #7
0
def calc_metrics(rrs):
    buf_duration_scnds = 300
    tacho_buf_dur = 300
    min_rr = 0.3
    max_rr = 1.7
    sp_skip = 10
    buf_size = int(max(buf_duration_scnds, tacho_buf_dur) / min_rr) + 1

    buf = Buffer(buf_size)
    print "[+] Calculating all usefull metrics: "
    si = SI(buf, buf_duration_scnds, min_rr, max_rr)
    sis = np.zeros(len(rrs))  # stress indices
    shs = np.zeros(
        (len(si.get_histogram()), len(rrs) / sp_skip + 1))  # stress histograms
    si_ready = 0

    sp = RRTachogrammEnergySpectrum(buf, tacho_buf_dur, min_rr, max_rr)
    sp_ready = 0
    sps = np.zeros((len(sp.get_spectrum()), len(rrs) / sp_skip + 1))
    ics = np.zeros(len(rrs))
    iscas = np.zeros(len(rrs))
    vbs = np.zeros(len(rrs))

    hrvs = np.zeros(len(rrs))
    cnt = -1
    ls = len(rrs)
    md = ls / 10
    for r in rrs:
        if cnt % md == 0:
            print "[+] Done {0:.2f}%".format(float(100 * cnt) / ls)
        cnt += 1
        si.update(r)
        sp.update(r)
        buf.add_sample(r)
        # ## Calculating stress indices
        si.calc_si()
        if si.is_ready():
            si_ready += 1
            sis[cnt] = si.get_stress()
            if cnt % sp_skip == 0:
                shs[:, cnt / sp_skip] = si.get_histogram()
        # ## Calculating RR-Tachogram spectrums
        if sp.is_ready():
            sp.calc_energy_spectrum()
            if cnt % sp_skip == 0:
                sps[:, cnt / sp_skip] = sp.get_spectrum()
            ics[cnt] = sp.get_IC()
            iscas[cnt] = sp.get_ISCA()
            vbs[cnt] = sp.get_vegetative_balance()
            sp_ready += 1
        # ## Fulfil heart rate buffer
    print "[+] Calculation finnished: SI ready " + "{0:.2f}%".format(
        float(si_ready * 100) / cnt) + " SP ready: {0:.2f}%".format(
            float(sp_ready * 100) / cnt)
    return sis, shs, sps, ics, iscas, vbs
Example #8
0
def test_remove_buffer():
    c = Container()
    b = Buffer('test')
    b2 = Buffer('test2')
    c.append_buffer(b)
    c.remove_buffer(b)
    assert c.buffers == []
    c.append_buffer(b)
    c.append_buffer(b2)
    c.remove_buffer(b2)
    assert c.buffers == [b]
Example #9
0
class Grid:

   def __init__(self):
      self.x = None
      self.y = None
      self.nx = None
      self.ny = None

   def __call__(self, x, y, nx, ny):
      checks = [ 
            self.x != x, 
            self.y != y, 
            self.nx != nx, 
            self.ny != ny ]
      if any(checks):
         self._update(x, y, nx, ny)

      self.buffer.draw()

   def _update(self, x, y, nx, ny):
      debug(msg='updating cache').flush()
      
      self.x = x
      self.y = y
      self.nx = nx
      self.ny = ny

      dx = self.x / self.nx
      dy = self.y / self.ny

      points = [ ]
      for i in range(nx):
         points.append([i*dx, 0.0, 0.0])
         points.append([i*dx,   y, 0.0])
      points.append([x, 0.0, 0.0])
      points.append([x, y,   0.0])

      for i in range(ny):
         points.append([0.0, i*dy, 0.0])
         points.append([x,   i*dy, 0.0])
      points.append([0, y, 0.0])
      points.append([x, y, 0.0])

      print('(x={}, y={})'.format(self.x, self.y))
      print('(nx={}, ny={})'.format(self.nx, self.ny))

      print('len(points)={}'.format(len(points)))

      self.buffer = Buffer(points)
      self.buffer.renderMode('lines')
Example #10
0
def test_length():
    b = Buffer(name)
    b.append_item(name)
    assert b.length() == 1
    for x in range(10):
        b.append_item(name)
        assert b.length() == x + 2
Example #11
0
    def __init__(self, env, agent, args):
        super(A2CTrainer, self).__init__()
        self.env = env
        self.agent = agent
        self.num_episodes = args.num_episodes
        self.buffer = Buffer(args.buffer_size, args.batch_size)

        assert len(
            args.solved) == 2, 'args.solved has to have length of exactly 2!'
        self.solved_r = args.solved[0]
        self.solved_ep = args.solved[1]
        self.render = args.render

        self.writer = SummaryWriter(args.tensorboard)
Example #12
0
    def do_SOURCE(self):
        self.useragent = self.headers.get('User-Agent', None)
        self.mount = self.path  # oh so simple
        self.stream_name = self.headers.get('ice-name', '<Unknown>')
        user, password = self._get_login()
        if (self.login(user=user, password=password)):
            if user == 'source':
                # No need to try; except because the self.login makes sure
                # we can split it.
                user, password = password.split('|')
            logger.info("source: User '%s' logged in correctly.", user)
            self.send_response(200)
            self.end_headers()
        else:
            if user == 'source':
                # We need to try; except here because we can't be certain
                # it contains a | at all.
                try:
                    user, password = password.split('|')
                except ValueError as err:
                    logger.info(
                        "source: Failed login, no separator found "
                        "from %s.", str(self.client_address))
                else:
                    logger.info("source: User '%s' failed to login from %s.",
                                user, str(self.client_address))
            self.send_response(401)
            self.end_headers()
            return

        self.audio_buffer = Buffer(max_size=MAX_BUFFER)
        self.icy_client = IcyClient(self.audio_buffer,
                                    self.mount,
                                    user=user,
                                    useragent=self.useragent,
                                    stream_name=self.stream_name)
        self.manager.register_source(self.icy_client)
        try:
            while True:
                data = self.rfile.read(4096)
                if data == '':
                    break
                self.audio_buffer.write(data)
        except:
            logger.exception("Timeout occured (most likely)")
        finally:
            logger.info("source: User '%s' logged off.", user)
            self.manager.remove_source(self.icy_client)
Example #13
0
   def _update(self, x, y, nx, ny):
      debug(msg='updating cache').flush()
      
      self.x = x
      self.y = y
      self.nx = nx
      self.ny = ny

      dx = self.x / self.nx
      dy = self.y / self.ny

      points = [ ]
      for i in range(nx):
         points.append([i*dx, 0.0, 0.0])
         points.append([i*dx,   y, 0.0])
      points.append([x, 0.0, 0.0])
      points.append([x, y,   0.0])

      for i in range(ny):
         points.append([0.0, i*dy, 0.0])
         points.append([x,   i*dy, 0.0])
      points.append([0, y, 0.0])
      points.append([x, y, 0.0])

      print('(x={}, y={})'.format(self.x, self.y))
      print('(nx={}, ny={})'.format(self.nx, self.ny))

      print('len(points)={}'.format(len(points)))

      self.buffer = Buffer(points)
      self.buffer.renderMode('lines')
Example #14
0
def test_buffer_dict():
    c = Container()
    names = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth']
    d = {name: Buffer(name) for name in names}
    for buffer in d.values():
        c.append_buffer(buffer)
    assert c.buffer_dict() == d
Example #15
0
    def __init__(self, buf, buf_duration_scnds=300, min_rr=0.3, max_rr=1.71):

        self.time_span = TimeSpanBuffer(
            buf, buf_duration_scnds)  # remembers last 300 sec
        buf_size = int(buf_duration_scnds / min_rr) + 1
        # energy spectrum
        self.cum_times_buf = Buffer(buf_size)
        self.time_step = 1.0 / 67  #min_rr
        self.spectr_intervals_cnt = int(
            buf_duration_scnds /
            self.time_step)  # assume 1 heart beat per second
        self.freqs = np.fft.fftfreq(self.spectr_intervals_cnt, self.time_step)
        idx = np.argsort(self.freqs)
        self.idx = [i for i in idx if 0.0 <= self.freqs[i] < 0.5]
        self.last_spectrum = np.zeros(len(self.idx), dtype=float)
        self._wnd = np.hamming(self.spectr_intervals_cnt)
        self.may_calc = 1.0 - max_rr / buf_duration_scnds  # when 90% of RR times collected in buffer - may start calculations
Example #16
0
   def _pre_draw(self):
      Buffer._pre_draw(self)

      if self._sprite_texture == None:
         glPointSize(self._point_size);
         return

      glDepthMask(GL_FALSE);

      glEnable(GL_POINT_SMOOTH);
      glEnable(GL_BLEND);
      glBlendFunc(GL_SRC_ALPHA, GL_ONE);

      self._sprite_texture.bind()

      glPointSize(self._point_sprite_size)

      glTexEnvf(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE);
      glEnable(GL_POINT_SPRITE);
Example #17
0
def test_format_item():
    class TestBuffer(Buffer):
        def format_item(self, item):
            """Make it possible to catch the override."""
            raise ValueError

    b = TestBuffer(name)
    with raises(ValueError):
        b.format_item(name)
    b = Buffer(name)
    assert b.format_item(name) is name
Example #18
0
    def __init__(self, mount):
        super(IcyContext, self).__init__()
        # : Set to last value returned by :attr:`source`:
        self.current_source = None

        # Threading sync lock
        self.lock = threading.RLock()

        # Create a buffer that always returns an empty string (EOF)
        self.eof_buffer = Buffer(1)
        self.eof_buffer.close()

        self.mount = mount
        # : Deque of tuples of the format STuple(source, ITuple(user, useragent, stream_name))
        self.sources = collections.deque()

        self.icecast_info = generate_info(mount)
        self.icecast = icecast.Icecast(self, self.icecast_info)

        self.saved_metadata = {}
 def __init__(self, stream_id , bit_rate_in_kbps = 128.0):
     Greenlet.__init__(self)
     self.stream_id = stream_id
     
     if(not AudioStreamReader.stream_buffers.get(self.stream_id, None)):
         buffer = Buffer()
         byte_rate = ((bit_rate_in_kbps/8)*1024)
         sleep_time = (buffer.chunk_byte_size*1.0)/byte_rate
         AudioStreamReader.stream_buffers[stream_id] = [buffer , byte_rate, sleep_time]
         
         
     self.buffer, self.byte_rate , self.sleep_time  = AudioStreamReader.stream_buffers[self.stream_id]
Example #20
0
def test_remove_item():
    b = Buffer(name)
    b.append_item(name)
    assert len(b.items) == 1
    assert b.items[0] == name
    b.remove_item(name)
    assert b.items == []
Example #21
0
def test_prepend_item():
    b = Buffer(name)
    b.prepend_item(name)
    assert b.items[0] == name
    other = name * 2
    b.prepend_item(other)
    assert b.items[0] == other
    assert len(b.items) == 2
Example #22
0
def test_append_item():
    b = Buffer(name)
    assert len(b.items) == 0
    b.append_item(name)
    assert b.items[-1] is name
    assert len(b.items) == 1
    other = name * 2
    b.append_item(other)
    assert b.items[-1] is other
    assert len(b.items) == 2
Example #23
0
    def do_SOURCE(self):
        self.useragent = self.headers.get('User-Agent', None)
        self.mount = self.path  # oh so simple
        self.stream_name = self.headers.get('ice-name', '<Unknown>')
        user, password = self._get_login()
        if (self.login(user=user, password=password)):
            if user == 'source':
                # No need to try; except because the self.login makes sure
                # we can split it.
                user, password = password.split('|')
            logger.info("source: User '%s' logged in correctly.", user)
            self.send_response(200)
            self.end_headers()
        else:
            if user == 'source':
                # We need to try; except here because we can't be certain
                # it contains a | at all.
                try:
                    user, password = password.split('|')
                except ValueError as err:
                    logger.info("source: Failed login, no separator found "
                                "from %s.", str(self.client_address))
                else:
                    logger.info("source: User '%s' failed to login from %s.",
                                user, str(self.client_address))
            self.send_response(401)
            self.end_headers()
            return

        self.audio_buffer = Buffer(max_size=MAX_BUFFER)
        self.icy_client = IcyClient(self.audio_buffer,
                                   self.mount,
                                   user=user,
                                   useragent=self.useragent,
                                   stream_name=self.stream_name)
        self.manager.register_source(self.icy_client)
        try:
            while True:
                data = self.rfile.read(4096)
                if data == '':
                    break
                self.audio_buffer.write(data)
        except:
            logger.exception("Timeout occured (most likely)")
        finally:
            logger.info("source: User '%s' logged off.", user)
            self.manager.remove_source(self.icy_client)
Example #24
0
def test_create():
    b = Buffer(name)
    assert b.name == name
    assert b.items == []
    def __init__(self, repairmen_day, repairmen_night, batchsize, buffersizeA, buffersizeB, buffersizeC, seed, duration, silent=False):
        PriorityQueue.__init__(self)
        self.silent = silent
        self.duration = duration
        random.seed(seed)

        self.throughputs = []
        self.stats = {}
        self.machines = []

        samplesA.sort()
        samplesB.sort()
        samplesD.sort()

        bufferA12 = Buffer(self, buffersizeA)
        bufferA34 = Buffer(self, buffersizeA)
        machineA1 = MachineA(self, [bufferA12])
        machineA2 = MachineA(self, [bufferA12])
        machineA3 = MachineA(self, [bufferA34])
        machineA4 = MachineA(self, [bufferA34])
        bufferA12.providers = [machineA1, machineA2]
        bufferA34.providers = [machineA3, machineA4]

        bufferB1 = AssemblyLine(self, buffersizeB)  # Assembly line
        bufferB2 = AssemblyLine(self, buffersizeB)  # Assembly line
        machineB1 = MachineB(self, [bufferA12], [bufferB1])
        machineB2 = MachineB(self, [bufferA34], [bufferB2])
        bufferA12.receivers = [machineB1]  # Done: not crosswise
        bufferA34.receivers = [machineB2]
        bufferB1.providers = [machineB1]  # Assembly line
        bufferB2.providers = [machineB2]  # Assembly line

        bufferC1 = Buffer(self, buffersizeC)
        bufferC2 = Buffer(self, buffersizeC)
        machineC1 = MachineC(self, [bufferB1, bufferB2], [bufferC1, bufferC2], batchsize)
        machineC2 = MachineC(self, [bufferB1, bufferB2], [bufferC1, bufferC2], batchsize)
        bufferB1.receivers = [machineC1, machineC2]  # Assembly line Done: crosswise deliveries
        bufferB2.receivers = [machineC1, machineC2]  # Assembly line
        bufferC1.providers = [machineC1]
        bufferC2.providers = [machineC2]

        machineD1 = MachineD(self, [bufferC1])
        machineD2 = MachineD(self, [bufferC2])
        bufferC1.receivers = [machineD1, machineD2]
        bufferC2.receivers = [machineD1, machineD2]

        self.machines = [machineA1, machineA2, machineA3, machineA4, machineB1,
                         machineB2, machineC1, machineC2, machineD1, machineD2]

        self.available_repairmen = repairmen_day
        self.repairman_day_night_difference = repairmen_day - repairmen_night
Example #26
0
def test_get_items():
    """Test all the get_*_items functions."""
    b = Buffer(name)
    b.append_item(name)
    assert b.get_current_item() is name
    for x in range(10):
        b.append_item(x)
    for x in range(10):
        assert b.get_next_item() is x
    assert b.get_first_item() is name
    assert b.get_last_item() is 9
    with raises(RuntimeError):
        b.get_next_item(wrap=False)
    assert b.get_next_item() is name
    with raises(RuntimeError):
        b.get_previous_item(wrap=False)
    assert b.get_previous_item() is 9
Example #27
0
 def __init__(self, callback):
   self.callback = callback
   # allocate a buffer to receive data
   self.buffer = Buffer()
   self.buffer.addCallback(self.callback)
Example #28
0
def test_buffer_names():
    c = Container()
    names = ['first', 'second', 'third', 'fourth', 'fifth']
    for name in names:
        c.append_buffer(Buffer(name))
    assert c.buffer_names() == names
Example #29
0
class RRTachogrammEnergySpectrum:
    def __init__(self, buf, buf_duration_scnds=300, min_rr=0.3, max_rr=1.71):

        self.time_span = TimeSpanBuffer(
            buf, buf_duration_scnds)  # remembers last 300 sec
        buf_size = int(buf_duration_scnds / min_rr) + 1
        # energy spectrum
        self.cum_times_buf = Buffer(buf_size)
        self.time_step = 1.0 / 67  #min_rr
        self.spectr_intervals_cnt = int(
            buf_duration_scnds /
            self.time_step)  # assume 1 heart beat per second
        self.freqs = np.fft.fftfreq(self.spectr_intervals_cnt, self.time_step)
        idx = np.argsort(self.freqs)
        self.idx = [i for i in idx if 0.0 <= self.freqs[i] < 0.5]
        self.last_spectrum = np.zeros(len(self.idx), dtype=float)
        self._wnd = np.hamming(self.spectr_intervals_cnt)
        self.may_calc = 1.0 - max_rr / buf_duration_scnds  # when 90% of RR times collected in buffer - may start calculations

    def _get_section_params(self, ps):
        return np.sum(ps), np.max(ps)

    def get_total_power(self):
        return self._get_section_params(self.last_spectrum)

    def get_hf_power(self):
        ids = [i for i in self.idx if 0.15 <= self.freqs[i] < 0.4]
        return self._get_section_params(self.last_spectrum[ids])

    def get_lf_power(self):
        ids = [i for i in self.idx if 0.04 <= self.freqs[i] < 0.15]
        return self._get_section_params(self.last_spectrum[ids])

    def get_vlf_power(self):
        ids = [i for i in self.idx if 0.015 <= self.freqs[i] < 0.04]
        return self._get_section_params(self.last_spectrum[ids])

    def get_ulf_power(self):
        ids = [i for i in self.idx if 0.0 <= self.freqs[i] < 0.015]
        return self._get_section_params(self.last_spectrum[ids])

    def get_IC(self):
        """
        Index of Centralization -IC (Index of centralization, IC = (VLF +LF / HF)
        IC reflects a degree of prevalence of non-respiratory sinus arrhythmia over the respiratory one.
        Actually - this is quantitative characteristic of ratio between central and independent contours
        of heart rhythm regulation.
        """
        vlf = self.get_vlf_power()[0]
        lf = self.get_lf_power()[0]
        hf = self.get_hf_power()[0]
        if hf > 0 and self.is_vlf_ready():
            return vlf + float(lf) / hf
        return 0

    def get_ISCA(self):
        """
        index of activation of sub cortical nervous centers ISCA (Index of Subcortical
        Centers Activity, ISCA = VLF / LF).

        ISCA characterizes activity of cardiovascular subcortical nervous center in relation
        to higher levels of management. The increased activity of sub cortical nervous centers
        is played by growth of ISCA.
        With help of this index processes the brain inhibiting effect can be supervised.
        """
        vlf = self.get_vlf_power()[0]
        lf = self.get_lf_power()[0]
        if lf > 0 and self.is_vlf_ready():
            return vlf / float(lf)
        return 0

    def get_vegetative_balance(self):
        """HF/LF is interpreted as a parameter of vegetative balance"""
        hf = self.get_hf_power()[0]
        lf = self.get_lf_power()[0]
        if lf > 0 and self.is_lf_ready():
            return hf / float(lf)
        return 0

    def get_freq(self):
        return self.freqs[self.idx]

    def get_spectrum(self):
        return self.last_spectrum

    def calc_energy_spectrum(self):

        #http://stackoverflow.com/questions/15382076/plotting-power-spectrum-in-python
        # get unidistant sampled times
        use_N_last_RRs = self.time_span.get_N()
        unitimes = np.linspace(self.cum_times_buf.samples[-use_N_last_RRs],
                               self.cum_times_buf.samples[-1],
                               self.spectr_intervals_cnt)

        # interpolate rr_ms_sqr
        uni_rr_ms_sqr = np.interp(
            unitimes,
            self.cum_times_buf.samples[-use_N_last_RRs:],
            #1.0/np.array(self.time_span.get_samples())) # energy levels over heart frequency
            np.array(self.time_span.get_samples()
                     ))  # 1.0/energy levels over 1.0/heart_frequency

        uni_rr_ms_sqr -= np.mean(uni_rr_ms_sqr)
        # calculate spectrum
        ps = np.abs(np.fft.fft(uni_rr_ms_sqr * self._wnd))**2
        #plt.plot(freqs[self.idx], ps[self.idx])
        #s = np.sum(ps[self.idx])
        #if s < self.zero_eps:
        #    s = 1.0
        self.last_spectrum = ps[self.idx]  #/ s
        #return self.last_spectrum

    def is_hf_ready(self):

        return self.time_span.buf_real_duration > 14.

    def is_lf_ready(self):

        return self.time_span.buf_real_duration > 50.

    def is_vlf_ready(self):

        return self.time_span.buf_real_duration > 132.0

    def is_ulf_ready(self):
        """
        Different buffer duration allows to perform different calculations
        ULF        Less than 0,015      More than 66 secs
        """
        return self.time_span.buf_real_duration > 300.  # standard time

    def is_ready(self):
        return self.is_vlf_ready()
        #return self.get_progress() >= self.may_calc

    def get_progress(self):
        return self.time_span.get_progress()

    def update(self, rr):
        self.time_span.update_buf_duration(rr)
        if len(self.cum_times_buf.samples) > 0:
            self.cum_times_buf.add_sample(self.cum_times_buf.samples[-1] + rr)
        else:
            self.cum_times_buf.add_sample(rr)
# ************   Define global variables and initialize    ************ #

num_bits = FLAGS.num_bits  # number of bits in the bit_flipping environment
tau = 0.95  # Polyak averaging parameter
buffer_size = 1e6  # maximum number of elements in the replay buffer
batch_size = 128  # number of samples to draw from the replay_buffer

num_epochs = FLAGS.num_epochs  # epochs to run training for
num_episodes = 16  # episodes to run in the environment per epoch
num_relabeled = 4  # relabeled experiences to add to replay_buffer each pass
gamma = 0.98  # weighting past and future rewards

# create bit flipping environment and replay buffer
bit_env = BitFlipEnv(num_bits)
replay_buffer = Buffer(buffer_size, batch_size)

# set up Q-policy (model) and Q-target (target_model)
model = Model(num_bits, scope='model', reuse=False)
target_model = Model(num_bits, scope='target_model', reuse=False)

update_ops_initial = update_target_graph('model', 'target_model', tau=0.0)
update_ops = update_target_graph('model', 'target_model', tau=tau)

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

# start by making Q-target and Q-policy the same
updateTarget(update_ops_initial, sess)

# ************   Helper functions    ************ #
Example #31
0
class A2CTrainer(TrainerBase):
    def __init__(self, env, agent, args):
        super(A2CTrainer, self).__init__()
        self.env = env
        self.agent = agent
        self.num_episodes = args.num_episodes
        self.buffer = Buffer(args.buffer_size, args.batch_size)

        assert len(
            args.solved) == 2, 'args.solved has to have length of exactly 2!'
        self.solved_r = args.solved[0]
        self.solved_ep = args.solved[1]
        self.render = args.render

        self.writer = SummaryWriter(args.tensorboard)

    def train(self):
        reward_history = []
        # For each update

        if self.render:
            self.env.render()

        # For each episode
        for episode in range(self.num_episodes):
            episode_reward = 0
            self.buffer.reset()
            state = self.env.reset()

            # While there is room in the buffer
            for i in range(self.buffer.buffer_size):
                # Get action
                action, log_probs, entropy = self.agent.get_action(state)

                # Take step
                state_, reward, done, _ = self.env.step(action)
                episode_reward += reward

                # Store transition in buffer for TD learning
                transition = [state, log_probs, entropy, reward, done]
                self.buffer.insert_transition(transition)

                # If the episode is finished (max_steps reached or from env)
                if done or i == self.buffer.buffer_size - 1:
                    print('Episode: ', episode, 'Reward: %i' % episode_reward)
                    reward_history.append(episode_reward)
                    self.writer.add_scalar("reward", episode_reward, episode)
                    self.writer.flush()

                    if len(reward_history) > self.solved_ep:
                        reward_history.pop(0)
                        if (sum(reward_history) /
                                len(reward_history)) >= self.solved_r:
                            print('Env has been solved at episode ' +
                                  str(episode) + '!')
                            self.writer.close()
                            exit()
                    break
                else:
                    state = state_

            # Get the estimated next step. If episode ended, then next value is 0, otherwise get from critic
            if self.buffer.buffer[-1][-1]:
                R = T.as_tensor([0])
            else:
                _, R = self.agent.model(state)
                R = R.detach()

            # Get transitions from buffer to train with
            transitions = self.buffer.get_buffer()
            states, log_probs, entropys, rewards, dones = self.agent.convert_to_tensors(
                transitions)

            # Calculate the discounted rewards
            returns = self.agent.calculate_returns(rewards, dones, R)
            actor_loss, critic_loss = self.agent.learn(states, log_probs,
                                                       returns, entropys)

            self.writer.add_scalar("loss/actor", actor_loss, episode)
            self.writer.add_scalar("loss/critic", critic_loss, episode)
            self.writer.flush()

        self.writer.close()
Example #32
0
class DDQNTrainer(TrainerBase):
    def __init__(self, env, agent, args):
        super(DDQNTrainer, self).__init__()
        self.env = env
        self.agent = agent
        self.num_episodes = args.num_episodes
        self.buffer = Buffer(args.buffer_size, args.batch_size)
        self.max_steps = args.max_steps

        assert len(
            args.solved) == 2, 'args.solved has to have length of exactly 2!'
        self.solved_r = args.solved[0]
        self.solved_ep = args.solved[1]
        self.render = args.render

        self.writer = SummaryWriter(args.tensorboard)

    def train(self):
        reward_history = []
        self.buffer.reset()
        # For each update

        if self.render:
            self.env.render()

        # For each episode
        for episode in range(self.num_episodes):
            episode_reward = 0
            episode_losses = []
            state = self.env.reset()

            # Until max_steps is reached
            for i in range(self.max_steps):

                # Get action
                action = self.agent.get_action(episode, state)

                # Take step based on action
                state_, reward, done, _ = self.env.step(action)
                episode_reward += reward

                # Store transition within the buffer for batched learning
                transition = [state, action, reward, state_, done]

                self.buffer.insert_transition(transition)

                # If the buffer is full, start learning using a random sample of transitions from the buffer
                if self.buffer.is_full():
                    batch = self.buffer.sample_buffer()
                    loss = self.agent.learn(batch)
                    episode_losses.append(loss)

                # Update the target network every 100 episodes
                if episode % 100 == 0:
                    self.agent.update_target()

                # If the episode has finished (max_steps reached or from env)
                if done or i == self.max_steps - 1:
                    print('Episode: ', episode, 'Reward: %i' % episode_reward)
                    reward_history.append(episode_reward)
                    self.writer.add_scalar("reward", episode_reward, episode)
                    self.writer.flush()

                    if len(reward_history) > self.solved_ep:
                        reward_history.pop(0)
                        if (sum(reward_history) /
                                len(reward_history)) >= self.solved_r:
                            print('Env has been solved at episode ' +
                                  str(episode) + '!')
                            self.writer.close()
                            exit()
                    break
                else:
                    state = state_

            if len(episode_losses) > 0:
                self.writer.add_scalar(
                    "loss/loss",
                    sum(episode_losses) / len(episode_losses), episode)
                self.writer.flush()

        self.writer.close()

bit_size = FLAGS.bit_size
tau = 0.95
buffer_size = 1e6
batch_size = 128

optimisation_steps = 40
num_epochs = FLAGS.num_epochs
num_cycles = 50
num_episodes = 16
K = 4
gamma = 0.98

bit_env = Env(bit_size)
replay_buffer = Buffer(buffer_size, batch_size)

model = Model(bit_size, scope='model', reuse=False)
target_model = Model(bit_size, scope='target_model', reuse=False)

update_ops_initial = update_target_graph('model', 'target_model', tau=0.0)
update_ops = update_target_graph('model', 'target_model', tau=tau)

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

#making the target network same as main network
updateTarget(update_ops_initial, sess)

total_loss = []
success_rate = []
Example #34
0
def test_insert_item():
    b = Buffer(name)
    b.append_item(name)
    other = name * 2
    b.insert_item(0, other)
    assert b.items[0] == other
Example #35
0
class IcyContext(object):
    """A class that is the context of a single icecast mountpoint."""
    def __init__(self, mount):
        super(IcyContext, self).__init__()
        # : Set to last value returned by :attr:`source`:
        self.current_source = None

        # Threading sync lock
        self.lock = threading.RLock()

        # Create a buffer that always returns an empty string (EOF)
        self.eof_buffer = Buffer(1)
        self.eof_buffer.close()

        self.mount = mount
        # : Deque of tuples of the format STuple(source, ITuple(user, useragent, stream_name))
        self.sources = collections.deque()

        self.icecast_info = generate_info(mount)
        self.icecast = icecast.Icecast(self, self.icecast_info)

        self.saved_metadata = {}

    def __enter__(self):
        self.lock.acquire()

    def __exit__(self, type, value, traceback):
        self.lock.release()

    def __repr__(self):
        return "IcyContext(mount={:s}, user count={:d})".format(
            self.mount, len(self.sources))

    def append(self, source):
        """Append a source client to the list of sources for this context."""
        source_tuple = STuple(
            source.buffer,
            ITuple(source.user, source.useragent, source.stream_name))
        logger.debug("Adding source '{source:s}' from '{context:s}'".format(
            source=repr(source_tuple),
            context=repr(self),
        ))
        self.sources.append(source_tuple)
        logger.debug("Current sources are '{sources:s}'.".format(
            sources=repr(self.sources)))

    def remove(self, source):
        """Remove a source client of the list of sources for this context."""
        source_tuple = STuple(
            source.buffer,
            ITuple(source.user, source.useragent, source.stream_name))
        logger.debug("Removing source '{source:s}' from '{context:s}'".format(
            source=repr(source_tuple),
            context=repr(self),
        ))
        self.sources.remove(source_tuple)
        logger.debug("Current sources are '{sources:s}'.".format(
            sources=repr(self.sources)))
        # Close our buffer to make sure we EOF
        source_tuple.buffer.close()

    @property
    def source(self):
        """Returns the first source in the :attr:`sources` deque.
        
        If :attr:`sources` is empty it returns :const:`None` instead
        """
        try:
            source = self.sources[0]
        except IndexError:
            logger.debug("Returning EOF in source acquiring.")
            return None
        else:
            if not self.current_source is source:
                logger.info("%s: Changing source from '%s' to '%s'.",
                            self.mount, 'None' if self.current_source is None \
                                        else self.current_source.info.user,
                            source.info.user)
                # We changed source sir. Send saved metadata if any.
                if source in self.saved_metadata:
                    metadata = self.saved_metadata[source]
                    self.icecast.set_metadata(metadata)
                else:
                    # No saved metadata, send an empty one
                    self.icecast.set_metadata(u'')
            self.current_source = source
            return source.buffer

    def read(self, size=4096, timeout=None):
        """Reads at most :obj:`size`: of bytes from the first source in the
        :attr:`sources`: deque. 
        
        :obj:`timeout`: is unused in this implementation."""

        # Acquire source once, then use that one return everywhere else.
        # Classic example of not-being-thread-safe in the old method.
        source = self.source
        while source is not None:
            # Read data from the returned buffer
            data = source.read(size)
            # Refresh our source variable to point to the top source
            source = self.source

            if data == b'':
                # If we got an EOF from the read it means we should check if
                # there is another source available and continue the loop.
                continue
            else:
                # Else we can just return the data we found from the source.
                return data
        # If we got here it means `self.source` returned None and we have no
        # more sources left to read from. So we can return an EOF.
        return b''

    def start_icecast(self):
        """Calls the :class:`icecast.Icecast`: :meth:`icecast.Icecast.start`:
        method of this context."""
        self.icecast.start()

    def stop_icecast(self):
        """Calls the :class:`icecast.Icecast`: :meth:`icecast.Icecast.close`:
        method of this context."""
        self.icecast.close()
        self.current_source = None

    def send_metadata(self, metadata, client):
        """Checks if client is the currently active source on this mountpoint
        and then sends the metadata. If the client is not the active source
        the metadata is saved for if the current source drops out."""
        try:
            source = self.sources[0]
        except IndexError:
            # No source, why are we even getting metadata ignore it
            # By Vin:
            # Some clients send meta slightly before connecting; save the
            # data for a second and attribute it to the next source?
            logger.warning("%s: Received metadata while we have no source.",
                           self.mount)
            return
        if (source.info.user == client.user):
            # Current source send metadata to us! yay
            logger.info("%s:metadata.update: %s", self.mount, metadata)
            self.saved_metadata[source] = metadata
            self.icecast.set_metadata(metadata)  # Lol consistent naming (not)
        else:
            for source in self.sources:
                if (source.info.user == client.user):
                    # Save the metadata
                    logger.info("%s:metadata.save: %s", self.mount, metadata)
                    self.saved_metadata[source] = metadata
Example #36
0
class IcyRequestHandler(BaseHTTPRequestHandler):
    manager = manager.IcyManager()
    def _get_login(self):
        try:
            login = self.headers['Authorization'].split()[1]
        except (IndexError, KeyError):
            return (None, None)
        else:
            return login.decode("base64").split(":", 1)

    def _serve_admin(self, url, query, user, password):
        # admin is 4 and higher
        is_admin = self.manager.login(user=user, password=password, privilege=3)
        # disabled = u'disabled' if not is_admin else None
        disabled = u'disabled'
        # TODO kicking. maybe.
        send_buf = []
        send_buf.append(server_header)

        for mount in self.manager.context:
            if self.manager.context[mount].sources:  # only include if there is a source on there
                send_buf.append(mount_header.format(mount=esc(mount)))
                for i, source in enumerate(self.manager.context[mount].sources):
                    metadata = self.manager.context[mount].saved_metadata.get(source, u'')
                    send_buf.append(client_html.format(\
                        user=esc(source.info.user),
                        meta=esc(metadata),
                        agent=esc(source.info.useragent),
                        stream_name=esc(source.info.stream_name),
                        mount=esc(mount, True),
                        num=i,
                        disabled=disabled))
                send_buf.append('</table>\n')
        send_buf.append('</body>\n</html>')
        send_buf = u''.join(send_buf)
        send_buf = send_buf.encode('utf-8', 'replace')
        try:
            self.send_response(200)
            self.send_header("Content-Type", "text/html")
            self.send_header("Content-Length", len(send_buf))
            self.end_headers()

            self.wfile.write(send_buf)
        except IOError as err:
            logger.exception("Error in request handler")

    def do_SOURCE(self):
        self.useragent = self.headers.get('User-Agent', None)
        self.mount = self.path  # oh so simple
        self.stream_name = self.headers.get('ice-name', '<Unknown>')
        user, password = self._get_login()
        if (self.login(user=user, password=password)):
            if user == 'source':
                # No need to try; except because the self.login makes sure
                # we can split it.
                user, password = password.split('|')
            logger.info("source: User '%s' logged in correctly.", user)
            self.send_response(200)
            self.end_headers()
        else:
            if user == 'source':
                # We need to try; except here because we can't be certain
                # it contains a | at all.
                try:
                    user, password = password.split('|')
                except ValueError as err:
                    logger.info("source: Failed login, no separator found "
                                "from %s.", str(self.client_address))
                else:
                    logger.info("source: User '%s' failed to login from %s.",
                                user, str(self.client_address))
            self.send_response(401)
            self.end_headers()
            return

        self.audio_buffer = Buffer(max_size=MAX_BUFFER)
        self.icy_client = IcyClient(self.audio_buffer,
                                   self.mount,
                                   user=user,
                                   useragent=self.useragent,
                                   stream_name=self.stream_name)
        self.manager.register_source(self.icy_client)
        try:
            while True:
                data = self.rfile.read(4096)
                if data == '':
                    break
                self.audio_buffer.write(data)
        except:
            logger.exception("Timeout occured (most likely)")
        finally:
            logger.info("source: User '%s' logged off.", user)
            self.manager.remove_source(self.icy_client)

    def do_GET(self):
        self.useragent = self.headers.get('User-Agent', None)
        parsed_url = urlparse.urlparse(self.path)
        parsed_query = urlparse.parse_qs(parsed_url.query)
        user, password = self._get_login()
        if user is None and password is None:
            if 'pass' in parsed_query:
                try:
                    user, password = parsed_query['pass'][0].split('|', 1)
                except (ValueError, IndexError, KeyError):
                    user, password = (None, None)
        if (self.login(user=user, password=password)):
            # Since the user and password are raw at this point we fix them up
            # If user is 'source' it means the actual user is still in the
            # password field.
            if user == 'source':
                # No need to try; except because the self.login makes sure
                # we can split it.
                user, password = password.split('|')
            if parsed_url.path == "/proxy":
                self._serve_admin(parsed_url, parsed_query, user, password)
            elif parsed_url.path == "/admin/metadata":
                try:
                    mount = parsed_query['mount'][0]
                except KeyError, IndexError:
                    mount = ''
                self.client = IcyClient(None, mount,
                                        user, self.useragent, None)

                song = parsed_query.get('song', None)
                encoding = parsed_query.get('charset', ['latin1'])
                if not song is None:
                    metadata = fix_encoding(song[0], encoding[0])
                    self.manager.send_metadata(metadata=metadata, client=self.client)

                # Send a response... although most clients just ignore this.
                try:
                    self.send_response(200)
                    self.send_header("Content-Type", "text/xml")
                    self.send_header("Content-Length", "113")
                    self.end_headers()

                    self.wfile.write('<?xml version="1.0"?>\n<iceresponse><message>Metadata update successful</message><return>1</return></iceresponse>')
                except IOError as err:
                    if hasattr(err, 'errno') and err.errno == 32:
                        #logger.warning("Broken pipe exception, ignoring")
                        pass
                    else:
                        logger.exception("Error in request handler")
            elif parsed_url.path == "/admin/listclients":
                auth = "{:s}:{:s}".format('source', config.icecast_pass)
                auth = auth.encode('base64')
                url = urlparse.urlparse('http://{:s}:{:d}/'.format(
                                                                   config.icecast_host,
                                                                   config.icecast_port)
                                        )
                url = url[:2] + parsed_url[2:]
                url = urlparse.urlunparse(url)

                request = urllib2.Request(url)
                request.add_header('User-Agent', self.useragent)
                request.add_header('Authorization', 'Basic {:s}'.format(auth))

                try:
                    result = urllib2.urlopen(request).read()
                except urllib2.HTTPError as err:
                    self.send_response(err.code)
                    self.end_headers()
                    return
                except urllib2.URLError as err:
                    self.send_reponse(501)
                    self.end_headers()
                    return

                result_length = len(result)

                self.send_response(200)
                self.send_header('Content-Type', 'text/xml')
                self.send_header('Content-Length', str(result_length))
                self.end_headers()

                self.wfile.write(result)
        else:
Example #37
0
def test_clear_items():
    b = Buffer(name)
    for x in range(10):
        b.append_item(x)
    b.clear_items()
    assert b.items == []
Example #38
0
from Database import DataBase

plt.rcParams["figure.figsize"] = [10, 6]

plt.ion()

fig, ax = plt.subplots()



def show_images(imgs):
    for i, img in enumerate(imgs):
        cv2.imshow('img{}'.format(i), img)


buffer = Buffer()
frame = None
cap = cv2.VideoCapture("../Speaking_Shreyan.mp4")

retvals = Queue()


def process_frame():
    imgs = chop(frame)
    buffer.reset_people()
    with ThreadPoolExecutor() as master:
        master.map(process_and_upload, imgs, repeat(buffer))
    classes, attentions = classify(buffer)
    # RITIK IDHAR SE UTHA
    dbt = Thread(target=db.insert_data,args=(classes, attentions))
    dbt.start()
Example #39
0
   def __init__(self, vertex_data=None, color_data=None):
      Buffer.__init__(self, vertex_data, color_data)

      self._sprite_texture = None
      self._point_size = 1.0
      self._point_sprite_size = 15.0