Beispiel #1
0
    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            Worker(listen_queue, pid).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info(
                'WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.'
            )

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
Beispiel #2
0
    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        #If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                Worker(listen_queue, pid, skip_mini, canary=True).start()
            else:
                Worker(listen_queue, pid, skip_mini).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info('WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.')

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
Beispiel #3
0
 def connectListen(self):  #连接监听线程
     self.__listen = Listen(self.netcard_cb.currentText(),
                            self.filter_line.text(), self.getData,
                            self.showError)  #生成监听线程
     self.stop_bt.clicked.connect(self.__listen.stopListen)  #连接相关接口
     self.overview_tb.clicked.connect(self.showTreeAndHex)
     self.preInit()  #初始化
     self.__listen.start()  #开启监听线程
Beispiel #4
0
def processing():
    data = json.loads(request.data)
    if 'type' not in data.keys():
        return 'ok'
    if data['type'] == 'confirmation':
        return callback_confirmation_token
    elif data['type'] == 'message_new':
        l1 = Listen()
        l1.choose()
        return 'ok'
    return 'ok'
Beispiel #5
0
    def run(self):
        """
        Determine the `MAX_QUEUE_SIZE` for the listen process.

        Determine if horizon should populate the mini redis store for Oculus.

        Starts the defined number of `WORKER_PROCESSES`, with the first worker
        populating the canary metric.

        Start the pickle (and UDP) listen processes.

        Start roomba.
        """
        logger.info('agent starting skyline %s' % skyline_app)
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        # If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                logger.info('%s :: starting Worker - canary' % skyline_app)
                Worker(listen_queue, pid, skip_mini, canary=True).start()
            else:
                logger.info('%s :: starting Worker' % skyline_app)
                Worker(listen_queue, pid, skip_mini).start()

        # Start the listeners
        logger.info('%s :: starting Listen - pickle' % skyline_app)
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        logger.info('%s :: starting Listen - udp' % skyline_app)
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        logger.info('%s :: starting Roomba' % skyline_app)
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info(
                'WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.'
            )

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
 def _create_test_data(self):
     test_data = [
         Listen().from_json(ujson.loads(jdata))
         for jdata in TEST_LISTEN_JSON
     ]
     self.logstore.insert(test_data)
     return len(test_data)
Beispiel #7
0
    def fetch_listens_from_storage(self, user_name, from_ts, to_ts, limit,
                                   order):
        """ The timestamps are stored as UTC in the postgres datebase while on retrieving
            the value they are converted to the local server's timezone. So to compare
            datetime object we need to create a object in the same timezone as the server.

            from_ts: seconds since epoch, in float
            to_ts: seconds since epoch, in float
        """

        # Quote single quote characters which could be used to mount an injection attack.
        # Sadly, influxdb does not provide a means to do this in the client library
        query = 'SELECT * FROM "\\"' + escape(user_name) + '\\""'

        if from_ts != None:
            query += "WHERE time > " + str(from_ts) + "000000000"
        else:
            query += "WHERE time < " + str(to_ts) + "000000000"

        query += " ORDER BY time " + ORDER_TEXT[order] + " LIMIT " + str(limit)
        try:
            results = self.influx.query(query)
        except Exception as e:
            self.log.error("Cannot query influx: %s" % str(e))
            return []

        listens = []
        for result in results.get_points(
                measurement=get_measurement_name(user_name)):
            listens.append(Listen.from_influx(result))

        if order == ORDER_ASC:
            listens.reverse()

        return listens
def generate_data(from_date, num_records):
    test_data = []
    current_date = to_epoch(from_date)
    artist_msid = str(uuid.uuid4())

    user = db.user.get_by_mb_id("test")
    if not user:
        db.user.create("test")
        user = db.user.get_by_mb_id("test")

    for i in range(num_records):
        current_date += 1  # Add one second
        item = Listen(
            user_id=user['id'],
            user_name='testuserplsignore',
            timestamp=datetime.utcfromtimestamp(current_date),
            artist_msid=artist_msid,
            recording_msid=str(uuid.uuid4()),
            release_msid=str(uuid.uuid4()),
            data={
                'artist_name': 'Test Artist Pls ignore',
                'track_name': 'Hello Goodbye',
                'additional_info': {},
            },
        )
        test_data.append(item)
    return test_data
Beispiel #9
0
 def GET(self,):
     ip=web.input()['ip']
     info=Listen.get_lastest_info(ip)
     if info==None:
         return None
     info=list(eval(info))
     info[2]=list(info[2])
     return info
 def convert_row(self, row):
     return Listen(user_id=row[1],
                   user_name=row[2],
                   timestamp=row[3],
                   artist_msid=row[4],
                   album_msid=row[5],
                   recording_msid=row[6],
                   data=row[7])
 def get_playing_now(self, user_id):
     """ Return the current playing song of the user """
     data = self.redis.get('playing_now' + ':' + str(user_id))
     if not data:
         return None
     data = ujson.loads(data)
     data.update({'listened_at': MIN_ID + 1})
     return Listen.from_json(data)
    def write(self, listen_dicts):
        t0 = time()
        listens = []
        for listen in listen_dicts:
            listens.append(Listen().from_json(listen))
        self.ls.insert(listens)
        self.time += time() - t0

        return True
Beispiel #13
0
    def lisen_name(cls):  #Ждёт имени гп, возвращает комманду

        source_cmd = None
        if cls._resume == False:  #Если пользователь не называл имени ранее
            voice = Listen.listen_voice(None, 2)
            voice = str(cls.recognize_name(voice))  #Распознаём имя
            if voice == 'alias':
                Say.speak("Слушаю")
                source_cmd = Listen.listen_voice(None,
                                                 10)  #Выполнение комманды
                cls._resume = True
            else:
                print("Не распознанно")
        else:  #Если пользователь называл имя ранее
            source_cmd = Listen.listen_voice(None, 10)  #Выполнение комманды
            if source_cmd == "err_tr" and source_cmd == "err":
                cls._resume = False

        return source_cmd
Beispiel #14
0
def generate_data(test_user_id, from_ts, num_records):
    test_data = []
    artist_msid = str(uuid.uuid4())

    for i in range(num_records):
        from_ts += 1  # Add one second
        item = Listen(user_id=test_user_id,
                      timestamp=datetime.utcfromtimestamp(from_ts),
                      artist_msid=artist_msid,
                      recording_msid=str(uuid.uuid4()))
        test_data.append(item)
    return test_data
def generate_data(from_date, num_records):
    test_data = []
    current_date = to_epoch(from_date)
    artist_msid = str(uuid.uuid4())

    user = db.user.get_by_mb_id("test")
    if not user:
        db.user.create("test")
        user = db.user.get_by_mb_id("test")

    for i in range(num_records):
        current_date += 1   # Add one second
        item = Listen(user_id=user['id'], timestamp=datetime.utcfromtimestamp(current_date), artist_msid=artist_msid,
                      recording_msid=str(uuid.uuid4()))
        test_data.append(item)
    return test_data
Beispiel #16
0
    def execute_cmd(cls, cmd):
        try:
            random_respond = random.randint(
                0,
                len(list(cls.respond[cmd['cmd']])) - 1)
        except KeyError:
            pass

        if cmd['cmd'] == 'cnotecreate':
            Say.speak("Записываю дарагой")
            note = Listen.listen_voice(None, 10)
            try:
                f = open('note.txt', 'a')
                f.write(note + '\n')
                f.close()
                Say.speak("написал туда сюда")
            except TypeError:
                Say.speak("Ты говори говори")
                print("[err] TypeError")
            except IOError:
                Say.speak("Ты че мне мозги делаешь какой файл э")
                print("[err] Не получилось открыть файл")

        elif cmd['cmd'] == 'cnoteopen':
            try:
                os.startfile("note.txt")
                Say.speak(cls.respond[cmd['cmd']][random_respond])
            except FileNotFoundError:
                print("[err] Не удаётся найти файл: " + cmd['rest'])
                Say.speak("Не удалось найти файл")

        elif cmd['cmd'] == 'cnoteread':
            l = []
            f = open('note.txt', 'r')
            for line in f:
                if line != None:
                    l.append(line)

            if len(l) != 0:
                Say.speak("У вас есть " + str(len(l)) + " заметки")
                j = 1
                for i in l:
                    Say.speak(str(j) + " Заметка " + i)
                    j += 1

            else:
                Say.speak("Ля какие заметки")
    def fetch_listens_from_storage(self, user_name, from_ts, to_ts, limit,
                                   order):
        """ The timestamps are stored as UTC in the postgres datebase while on retrieving
            the value they are converted to the local server's timezone. So to compare
            datetime object we need to create a object in the same timezone as the server.

            from_ts: seconds since epoch, in float
            to_ts: seconds since epoch, in float
        """

        # Quote single quote characters which could be used to mount an injection attack.
        # Sadly, influxdb does not provide a means to do this in the client library
        user_name = user_name.replace("'", "\'")

        query = """SELECT *
                     FROM listen
                    WHERE user_name = '""" + user_name + "' "
        if from_ts != None:
            query += "AND time > " + str(from_ts) + "000000000"
        else:
            query += "AND time < " + str(to_ts) + "000000000"

        query += " ORDER BY time " + ORDER_TEXT[order] + " LIMIT " + str(limit)
        try:
            results = self.influx.query(query)
        except Exception as e:
            self.log.error("Cannot query influx: %s" % str(e))
            return []

        listens = []
        for result in results.get_points(measurement='listen'):
            dt = datetime.strptime(result['time'], "%Y-%m-%dT%H:%M:%SZ")
            t = int(dt.strftime('%s'))
            mbids = []
            for id in result.get('artist_mbids', '').split(","):
                if id:
                    mbids.append(id)
            tags = []
            for tag in result.get('tags', '').split(","):
                if tag:
                    tags.append(tag)

            data = {
                'artist_mbids': mbids,
                'album_msid': result.get('album_msid', ''),
                'album_mbid': result.get('album_mbid', ''),
                'album_name': result.get('album_name', ''),
                'recording_mbid': result.get('recording_mbid', ''),
                'tags': tags
            }
            l = Listen(timestamp=t,
                       user_name=result.get('user_name', '<unknown>'),
                       artist_msid=result.get('artist_msid', ''),
                       recording_msid=result.get('recording_msid', ''),
                       data={
                           'additional_info': data,
                           'artist_name': result.get('artist_name', ''),
                           'track_name': result.get('track_name', '')
                       })
            listens.append(l)

        if order == ORDER_ASC:
            listens.reverse()

        return listens
Beispiel #18
0
 def GET(self,):
     ip=web.input()['ip']
     info=Listen.get_all_info(ip)
     if info==None:return None
     return info
Beispiel #19
0
class HomeForm(QMainWindow, Ui_Form):
    protocolMap = {"1": "ICMP", "6": "TCP", "17": "UDP"}  #协议字典

    def __init__(self, parent=None):  #主界面初始化
        super().__init__(parent)
        self.setupUi(self)
        self.showNetcard(self.netcard_cb)
        self.listen_bt.clicked.connect(self.connectListen)

    def showNetcard(self, netcard_cb):  #显示所有网卡
        netcards = pcapy.findalldevs()  #查找
        netcard_cb.addItems(netcards)  #显示

    def preInit(self):  #监听前的初始化函数
        self.__resList = []
        self.__cnt = {"TCP": 0, "UDP": 0, "ICMP": 0}
        self.overview_tb.setRowCount(0)  #清空总览表
        self.protocol_tree.clear()
        self.code_txt.clear()
        self.statistic_txt.clear()

    def showError(self):  #提示过滤规则有误
        QMessageBox.information(QWidget(), "提示", "输入的过滤规则格式有误!")

    def connectListen(self):  #连接监听线程
        self.__listen = Listen(self.netcard_cb.currentText(),
                               self.filter_line.text(), self.getData,
                               self.showError)  #生成监听线程
        self.stop_bt.clicked.connect(self.__listen.stopListen)  #连接相关接口
        self.overview_tb.clicked.connect(self.showTreeAndHex)
        self.preInit()  #初始化
        self.__listen.start()  #开启监听线程

    def getData(self):  #获取数据包
        res = self.__listen.getData()
        if res != None:  #解析结果不为空,显示到主界面
            targetRow = self.overview_tb.rowCount()
            self.overview_tb.insertRow(targetRow)
            targetData = [
                res["time"], res["ipSource"], res["ipTarget"],
                self.protocolMap[res["ipProtocol"]], res["dataLength"],
                res["information"]
            ]
            for column in range(self.overview_tb.columnCount()):
                item = QTableWidgetItem(targetData[column])
                item.setTextAlignment(Qt.AlignHCenter
                                      | Qt.AlignVCenter)  #文本居中显示
                self.overview_tb.setItem(targetRow, column, item)
            self.__resList.append(res)
            upperProtocol = self.protocolMap[res["ipProtocol"]]
            self.__cnt[upperProtocol] = self.__cnt[upperProtocol] + 1  #更新统计数据
            self.showStatistic()

    def getSelectedRow(self):  #获取选中行号
        items = self.overview_tb.selectedItems()
        return items[0].row()

    def showTreeAndHex(self):  #显示协议树和16进制码
        self.showTree()
        self.showHex()

    def showTree(self):  #显示协议树
        selectedRow = self.getSelectedRow()
        res = self.__resList[selectedRow]
        self.protocol_tree.clear()  #清空

        rootFrame = QTreeWidgetItem(self.protocol_tree)  #显示数据包的相关信息
        rootFrame.setText(
            0, "Frame %s: capture %s bytes totally" %
            (str(selectedRow + 1), res["dataLength"]))
        childFrame = QTreeWidgetItem(rootFrame)
        childFrame.setText(0, "Capture time: " + res["time"])

        rootEther = QTreeWidgetItem(self.protocol_tree)  #显示以太网头部信息
        rootEther.setText(
            0, "Ethernet II, Source: (%s), Target: (%s)" %
            (res["macSource"], res["macTarget"]))
        childEther = QTreeWidgetItem(rootEther)
        childEther.setText(
            0, "Target MAC: (%s)\nSource MAC: (%s)\nType: IP(0x%04x)" %
            (res["macTarget"], res["macSource"], int(res["etherProtocol"])))

        rootInter = QTreeWidgetItem(self.protocol_tree)  #显示IP数据包头部信息
        rootInter.setText(
            0, "Internet protocol, Source: (%s), Target: (%s)" %
            (res["ipSource"], res["ipTarget"]))
        childInter1 = QTreeWidgetItem(rootInter)
        childInter1.setText(
            0,
            "IP version: %s\nIP head length: %s bytes\nIP service type: 0x%02x\nIP total length: %s\nIP identification: %s\nIP flags: 0x%02x"
            % (res["ipVersion"], res["ipHeadLen"], int(res["ipServiceType"]),
               res["ipTotalLen"], res["ipIdenti"], int(res["ipFlag"])))
        childInter2 = QTreeWidgetItem(rootInter)
        childInter2.setText(
            0,
            "IP fragment offset: %s\nIP TTL: %s\nProtocol: %s(0x%02x)\nIP head checksum: %s\nIP source: (%s)\nIP target: (%s)"
            % (res["ipOffset"], res["ipTTL"],
               self.protocolMap[res["ipProtocol"]], int(res["ipProtocol"]),
               res["ipCheckSum"], res["ipSource"], res["ipTarget"]))

        if res["ipProtocol"] == "6":  #显示TCP头部信息
            rootTrans = QTreeWidgetItem(self.protocol_tree)
            rootTrans.setText(
                0,
                "Transmission control protocol, Source port: (%s), Target port: (%s)"
                % (res["tcpSourcePort"], res["tcpTargetPort"]))
            childTrans1 = QTreeWidgetItem(rootTrans)
            childTrans1.setText(
                0,
                "TCP source port: %s\nTCP target port: %s\nTCP sequence number: %s\nTCP acknowledge number: %s"
                % (res["tcpSourcePort"], res["tcpTargetPort"],
                   res["tcpSequence"], res["tcpAck"]))
            childTrans2 = QTreeWidgetItem(rootTrans)
            childTrans2.setText(
                0,
                "TCP head length: %s bytes\nTCP flags: 0x%02x\nTCP window size: %s\nTCP checksum: %s\nTCP urgent pointer: %s"
                % (res["tcpHeadLen"], int(res["tcpFlags"]),
                   res["tcpWindowSize"], res["tcpCheckSum"], res["tcpUrgent"]))

        elif res["ipProtocol"] == "17":  #显示UDP头部信息
            rootTrans = QTreeWidgetItem(self.protocol_tree)
            rootTrans.setText(
                0,
                "User datagram protocol, Source port: (%s), Target port: (%s)"
                % (res["udpSourcePort"], res["udpTargetPort"]))
            childTrans = QTreeWidgetItem(rootTrans)
            childTrans.setText(
                0,
                "UDP source port: %s\nUDP target port: %s\nUDP data length: %s bytes\nUDP checksum: %s"
                % (res["udpSourcePort"], res["udpTargetPort"],
                   res["udpDataLen"], res["udpCheckSum"]))

        elif res["ipProtocol"] == "1":  #显示ICMP头部信息
            rootTrans = QTreeWidgetItem(self.protocol_tree)
            rootTrans.setText(
                0,
                "Internet control management protocol, Type: (%s), Code: (%s)"
                % (res["icmpType"], res["icmpCode"]))
            childTrans = QTreeWidgetItem(rootTrans)
            childTrans.setText(
                0,
                "ICMP type: %s\nICMP code: %s\nICMP checksum: %s\nICMP identification: %s\nICMP sequence number: %s"
                % (res["icmpType"], res["icmpCode"], res["icmpCheckSum"],
                   res["icmpIdenti"], res["icmpSequence"]))

    def showHex(self):  #显示16进制码
        selectedRow = self.getSelectedRow()
        hex = self.__resList[selectedRow]["originalHex"]
        hexString = re.sub(r"(?<=\w)(?=(?:\w\w)+$)", " ", hex)
        hexUnicode = ' '.join([
            chr(int(i, 16))
            for i in [hex[j:j + 2] for j in range(0, len(hex), 2)]
        ])

        self.code_txt.clear()  #清空
        self.code_txt.appendPlainText("Data hex string:\n" + hexString + "\n")
        self.code_txt.appendPlainText("Data unicode:\n" + hexUnicode + "\n")

    def showStatistic(self):  #显示统计数据
        self.statistic_txt.clear()  #清空
        tmp = ["TCP", "UDP", "ICMP"]
        for i in range(3):
            self.statistic_txt.appendPlainText(tmp[i] + " total packets: " +
                                               str(self.__cnt[tmp[i]]))
Beispiel #20
0
    diff = 40 - txt.size(1)
    z = torch.zeros((batch_size, diff)).long()
    z = z.to(device)
    return torch.cat((txt, z), dim=1)


if __name__ == '__main__':
    root_dir = '../data/'
    dev_dir = 'dev/'
    model_path = 'syncnet_v2.model'
    dataset = LRWDataset(root_dir, dev_dir + 'dev.csv', is_dev=True)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    batch_size = 2

    watch_net = Watch.WatchNet(root_dir, model_path, device)
    listen_net = Listen.ListenNet(device)
    spell_net = Spell.SpellNet(SPELL_LAYERS, SPELL_HIDDEN, SPELL_OUTPUT, device)

    # listen_model = listen.get_model()
    # spell_model = spell.get_model()

    dataloader = DataLoader(dataset,
                            collate_fn=collate_data_streams,
                            batch_size=batch_size,
                            drop_last=True)

    watch_param = watch_net.parameters()
    listen_param = listen_net.parameters()
    spell_param = spell_net.parameters()

    tot_param = list(watch_param) + list(listen_param) + list(spell_param)
Beispiel #21
0
    def run(self):
        """
        Determine the `MAX_QUEUE_SIZE` for the listen process.

        Determine if horizon should populate the mini redis store for Oculus.

        Starts the defined number of `WORKER_PROCESSES`, with the first worker
        populating the canary metric.

        Start the pickle (and UDP) listen processes.

        Start roomba.
        """
        logger.info('agent starting skyline %s' % skyline_app)
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        # If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                # @modified 20201017 - Feature #3788: snab_flux_load_test
                #                      Feature #3680: horizon.worker.datapoints_sent_to_redis
                # Added worker_number
                logger.info(
                    '%s :: starting Worker - canary, worker number %s' %
                    (skyline_app, str(i)))
                Worker(listen_queue,
                       pid,
                       skip_mini,
                       worker_number=i,
                       canary=True).start()
            else:
                logger.info('%s :: starting Worker, worker number %s' %
                            (skyline_app, str(i)))
                Worker(listen_queue,
                       pid,
                       skip_mini,
                       worker_number=i,
                       canary=False).start()

        # Start the listeners
        logger.info('%s :: starting Listen - pickle' % skyline_app)
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        logger.info('%s :: starting Listen - udp' % skyline_app)
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # @added 20201122 - Feature #3820: HORIZON_SHARDS
        # Add an additional listen process on a different port for the shard
        if HORIZON_SHARDS:
            if HORIZON_SHARD_PICKLE_PORT:
                logger.info(
                    '%s :: starting Listen - pickle for horizon shard on port %s'
                    % (skyline_app, str(settings.HORIZON_SHARD_PICKLE_PORT)))
                try:
                    Listen(settings.HORIZON_SHARD_PICKLE_PORT,
                           listen_queue,
                           pid,
                           type="pickle").start()
                except Exception as e:
                    logger.error(
                        'error :: agent.py falied to start Listen for horizon shard - %s'
                        % str(e))
            else:
                logger.error(
                    'error :: agent.py could not start the horizon shard Listen process as no port'
                )

        # Start the roomba
        logger.info('%s :: starting Roomba' % skyline_app)
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info(
                'WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.'
            )

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
Beispiel #22
0
        else:
            return "error in sending message", 500
    else:
        return "incorrect parameters", 405


@app.route("/")
def template():
    return render_template("index.html")


@app.route("/listen", methods=['GET'])
def listen():
    """
    Requests the listened messages from the backend
    Returns list of listened messages
    """
    listen_list = listen_object.get_list()
    listen_object.reset_list()
    return jsonify(listen_list), 200


if __name__ == '__main__':
    """
    Creates objcet of listen class and starts the listen thread
    Runs app
    """
    listen_object = Listen([])
    listen_object.start_thread()
    app.run()
Beispiel #23
0
    def write(self, listen_dicts):

        submit = []
        unique = []

        # Calculate the time range that this set of listens coveres
        min_time = 0
        max_time = 0
        user_name = ""
        for listen in listen_dicts:
            t = int(listen['listened_at'])
            if not max_time:
                min_time = max_time = t
                user_name = listen['user_name']
                continue

            if t > max_time:
                max_time = t

            if t < min_time:
                min_time = t

        # Quote single quote characters which could be used to mount an injection attack.
        # Sadly, influxdb does not provide a means to do this in the client library
        user_name = user_name.replace("'", "\'")

        # quering for artist name here, since a field must be included in the query.
        query = """SELECT time, artist_name
                     FROM listen
                    WHERE user_name = '%s'
                      AND time >= %d000000000
                      AND time <= %d000000000
                """ % (user_name, min_time, max_time)
        while True:
            try:
                results = i.query(query)
                break
            except Exception as e:
                self.log.error("Cannot query influx: %s" % str(e))
                sleep(3)

        # collect all the timestamps for this given time range.
        timestamps = {}
        for result in results.get_points(measurement='listen'):
            dt = datetime.strptime(result['time'], "%Y-%m-%dT%H:%M:%SZ")
            timestamps[int(dt.strftime('%s'))] = 1

        duplicate_count = 0
        unique_count = 0
        for listen in listen_dicts:
            # Check to see if the timestamp is already in the DB
            t = int(listen['listened_at'])
            if t in timestamps:
                duplicate_count += 1
                continue

            unique_count += 1
            submit.append(Listen().from_json(listen))
            unique.append(listen)

        self.log.error("dups: %d, unique %d" % (duplicate_count, unique_count))
        if not unique_count:
            return True

        try:
            t0 = time()
            self.ls.insert(submit)
            self.time += time() - t0
        except ValueError as e:
            self.log.error("Cannot write data to listenstore: %s" % str(e))
            return False

        try:
            self.publisher.publish(unique)
        except NoSubscribersException:
            self.log.error("No subscribers, cannot publish unique listens.")

        return True
Beispiel #24
0
    def write(self, listen_dicts):
        submit = []
        unique = []

        # Calculate the time range that this set of listens coveres
        min_time = 0
        max_time = 0
        user_name = ""
        for listen in listen_dicts:
            t = int(listen['listened_at'])
            if not max_time:
                min_time = max_time = t
                user_name = listen['user_name']
                continue

            if t > max_time:
                max_time = t

            if t < min_time:
                min_time = t

        # quering for artist name here, since a field must be included in the query.
        query = """SELECT time, artist_name
                     FROM "\\"%s\\""
                    WHERE time >= %d000000000
                      AND time <= %d000000000
                """ % (escape(user_name), min_time, max_time)

        while True:
            try:
                results = self.influx.query(query)
                break
            except Exception as e:
                self.log.error("Cannot query influx: %s" % str(e))
                sleep(3)

        # collect all the timestamps for this given time range.
        timestamps = {}
        for result in results.get_points(
                measurement=get_measurement_name(user_name)):
            dt = datetime.strptime(result['time'], "%Y-%m-%dT%H:%M:%SZ")
            timestamps[int(dt.strftime('%s'))] = 1

        duplicate_count = 0
        unique_count = 0
        for listen in listen_dicts:
            # Check to see if the timestamp is already in the DB
            t = int(listen['listened_at'])
            if t in timestamps:
                duplicate_count += 1
                continue

            unique_count += 1
            submit.append(Listen().from_json(listen))
            unique.append(listen)

        while True:
            try:
                t0 = time()
                self.ls.insert(submit)
                self.time += time() - t0
                break

            except ConnectionError as e:
                self.log.error("Cannot write data to listenstore: %s. Sleep." %
                               str(e))
                sleep(ERROR_RETRY_DELAY)
                continue

            except (InfluxDBClientError, InfluxDBServerError, ValueError) as e:
                self.log.error("Cannot write data to listenstore: %s" % str(e))
                if DUMP_JSON_WITH_ERRORS:
                    self.log.error("Was writing the following data: ")
                    self.log.error(json.dumps(submit, indent=4))
                return False

        self.log.error("dups: %d, unique %d" % (duplicate_count, unique_count))
        if not unique_count:
            return True

        while True:
            try:
                self.unique_ch.basic_publish(exchange='unique',
                                             routing_key='',
                                             body=ujson.dumps(unique),
                                             properties=pika.BasicProperties(
                                                 delivery_mode=2, ))
                break
            except pika.exceptions.ConnectionClosed:
                self.connect_to_rabbitmq()

        self.redis.incr(UNIQUE_QUEUE_SIZE_KEY, unique_count)

        return True
Beispiel #25
0
    def make_groups(self,linkage):
        size = len(linkage) + 1
        #print("size: " + str(size))
        #print("ideal list is " + str(self.ideal_sample_indexes))

        clusters = []
        temp = clusters

        # A dictionary that contains where in the clusters list each index maps to
        indices = {}

        # Use the size for a stopping heurstic
        i = size
        for row in linkage:

            temp = copy.deepcopy(clusters)

            if (row[0] < size and row[1] < size):
                # Make a new cluster!
                new_list = [row[0], row[1]]
                indices[i] = len(clusters)
                clusters.append(new_list)
                
            else:
                # This row is adding a new point to an existing cluster.
                if (row[0] < size):
                    # row[1] is an existing cluster
                    # Add row[0] to cluster row[1]
                    clusters[indices[row[1]]].append(row[0])

                    # Update dictionary
                    indices[i] = indices[row[1]]

                elif (row[1] < size):
                    # row[0] is an existing cluster
                    # Add row[1] to cluster row[0]
                    clusters[indices[row[0]]].append(row[1])

                    # Update dictionary
                    indices[i] = indices[row[0]]

                else:                                 
                    clusters[indices[row[0]]].extend(clusters[indices[row[1]]])
                    clusters.pop(indices[row[1]])
                    indices[i] = indices[row[0]]
                    compare_val = indices[row[1]]
                    for key in indices:
                        if indices[key] >=  compare_val:
                            indices[key] -= 1
            i += 1

            flag = 0

            # New break heuristic: Run until two ideal_sample_indexes are in the same cluster
            for cluster in clusters:
                count = 0
                for j in self.ideal_sample_indexes:
                    find = cluster.count(j)
                    if find > 1:
                        print("WARNING")
                        break
                    count += find
                if count > 1:
                    # print("collision between clusters")
                    flag = 1
                    break

            if flag == 1:
                break
        ######################################################################################## 

        clusters = temp

        self.bass_cluster = next(filter(lambda x: self.ideal_bass in x, clusters))
        self.treb_cluster = next(filter(lambda x: self.ideal_treble in x,
                                        clusters))
        self.kick_cluster = next(filter(lambda x: self.ideal_kick in x,
                                        clusters))
        self.snare_cluster = next(filter(lambda x: self.ideal_hihat in x,
                                        clusters))

        listen = Listen()
        # listen.play_instrument(self.instruments[ideal_hihat])

        # print("*************************Kick Row**********************")
        # for sample in kick_row:
        #     listen.play_instrument(self.instruments[int(sample)])

        # print("*************************Snare Row************************")
        # for sample in snare_row:
        #     listen.play_instrument(self.instruments[int(sample)])

        # print("*************************Bass Samples************************")
        # for sample in self.bass_cluster:
        #     print("Sample " + str(sample) + ":")
        #     listen.play_instrument(self.instruments[int(sample)])

        # print("*************************Treble Row************************")
        # for sample in treb_row:
        #     listen.play_instrument(self.instruments[int(sample)])


        #print(bass_row)
        #print(treb_row)

        # Find pairings 
        # for s in bass_row:
        #     for slist in self.collected_song_samples:
        #         for s2 in slist:

        basstreble_parings = []

        for slist in self.collected_song_samples:
            snb = [i for i in slist if i in self.bass_cluster]
            snt = [i for i in slist if i in self.treb_cluster]
            if snb and snt:
                #print(slist)
                for a in snb:
                    for b in snt:
                        #print(str(a) + ": " + str(b))
                        basstreble_parings.append([a, b])

        self.basstreble_parings = basstreble_parings
Beispiel #26
0
import sys
from listen import Listen

if __name__ == '__main__':
    print('Booting up CDS..')
    cdsListener = Listen()
    cdsListener.main()