Esempio n. 1
0
 def get_max_datetime(self, table_name):
     """ Calculates and returns the maximum date up to which data is already present in DB
         Parameter: name of table
     """
     db = get_db()
     query = '''select {} from {}'''.format(self.column_check_date,
                                            table_name)
     cursor = db.cursor()
     try:
         cursor.execute(query)
         result = cursor.fetchall()
         dates = list()
         for item in result:
             dates.append(self.__get_datetime(item[0]))
         db.close()
         return max(dates)
     except Exception as e:
         try:
             db.close()
         except:
             pass
         print "Exception while generating max date :", e
         Logger.log('error',
                    "Exception while generating max date : {}.".format(e))
         return None
Esempio n. 2
0
 def __init__(self, queue, ip):
     self.log = Logger(DATA_CLIENT_LOG_FILE, D_VERB)
     self.log.info('[MAIN THREAD] Instantiatie data_client')
     self.transmit = queue
     self.receiving = False
     self.remote_ip = ip
     self.my_ip = socket.gethostbyname(socket.gethostname())
Esempio n. 3
0
 def __init__(self, ip):
     self.log = Logger(MAIN_CLIENT_LOG_FILE, D_VERB)
     self.log.info('[MAIN THREAD] Instantiated client')
     self.receiving = False
     self.define_headers()
     self.targets = {}
     self.transmit = Queue.Queue()
     self.data_client = DataClient(self.transmit, ip)
     self.data_processor = DataProcessor(self.transmit, self.headers,
                                         self.targets)
     self.connect(ip)
Esempio n. 4
0
def main(gpu, args):
    rank = args.nr * args.gpus + gpu
    dist.init_process_group(backend='nccl',
                            init_method='env://',
                            world_size=args.world_size,
                            rank=rank)

    with open(args.config) as config_file:
        params = json.load(config_file)
        params['nodes'] = args.nodes
        params['gpus'] = args.gpus
        params['nr'] = args.nr
        params['world_size'] = args.world_size
        params['rank'] = rank

    random.seed(params['seed'])
    np.random.seed(params['seed'])
    torch.manual_seed(params['seed'])

    preproc = Preprocessor(sp_model=params['sp_model'])
    params['vocab_size'] = preproc.vocab_size

    logger = Logger(params['logfile'])

    logger.write('Loading data...')
    train_dataloader, eval_dataloader = prepare_dataset(
        dataset_dir=params['dataset_dir'],
        train_sets=params['train_sets'],
        eval_sets=params['eval_sets'],
        batch_size=params['batch_size'],
        preproc=preproc,
        world_size=args.world_size,
        rank=rank)

    logger.write('Model initialization...')
    model, num_params = make_model(params)
    logger.write(f'Total number of parameters: {num_params}')

    torch.cuda.set_device(gpu)
    model.cuda()

    optimizer = torch.optim.Adadelta(model.parameters(),
                                     lr=params['lr'],
                                     eps=1e-8,
                                     rho=0.95,
                                     weight_decay=params['weight_decay'])

    model, optimizer = amp.initialize(min_loss_scale=1.0,
                                      models=model,
                                      optimizers=optimizer,
                                      opt_level=params['opt_level'])
    model = DDP(model)

    train(data=[train_dataloader, eval_dataloader, preproc],
          model=model,
          optimizer=optimizer,
          logger=logger,
          params=params)
Esempio n. 5
0
async def auth_check(service, websocket):
    """ check rights for work with server """

    # init error logger
    error_logger = Logger('ws_server_errors')

    cookies_string = websocket.request_headers.get("Cookie", None)
    cookies = await cookies_to_dict(cookies_string)
    if not cookies:
        await error_logger.prepare_and_save_record(Logger.ERROR, 'Websocket without cookie', websocket)
        return False, None

    ci_session = cookies.get('ci_session', None)
    if not ci_session:
        await error_logger.prepare_and_save_record(Logger.ERROR, 'Websocket without ci_session cookie', websocket)
        return False, None

    service['url'] = 'http://127.0.0.1:8000/blog/123/vote/'
    service['url'] = ''.join([service['url'], '?session_id', ci_session])

    response = await http_async_get_json(service['url'])
    response = True
    if response:
        id = randrange(1, 4)
        if id == 1:
            response = json.loads('{{"agency_id": {0}, "user_id": {1}}}'.format(1, 100))
        elif id == 2:
             response = json.loads('{{"agency_id": {0}, "user_id": {1}}}'.format(2, 200))
        else:
            response = json.loads('{{"agency_id": {0}, "user_id": {1}}}'.format(1, 101))
        return True, response
    else:
        await error_logger.prepare_and_save_record(Logger.ERROR, 'Websocket with wrong ci_session cookie', websocket)
        return False, None
Esempio n. 6
0
 def __alter_table_collumn_add(self, db, table_name, new_collumns):
     """ Alters a table by adding new columns where data type will always be varchar(500)
         Parameters: a database connection object
                     name of table
                     list of new columns to be added
     """
     for item in new_collumns:
         print "adding new column '{}' to '{}'".format(item, table_name)
         Logger.log(
             'info',
             "adding new column '{}' to '{}'".format(item, table_name))
         query = '''ALTER TABLE {} ADD {} varchar(500)'''.format(
             table_name, item)
         db.cursor().execute(query)
         print "column added."
         Logger.log('info', "column added.")
Esempio n. 7
0
    def __init__(self, headers, transmit, connection_table):
        self.step = D_STEP
        self.timeout = int(D_TIMEOUT / self.step)

        self.log = Logger(DATA_SERVER_LOG_FILE, D_VERB)
        self.run = True

        self.receivers = []
        self.transmit = transmit
        self.connection_table = connection_table

        self.sys_headers = headers['system']
        self.proc_headers = headers['process']

        self.data_thread = threading.Thread(target = self.process_loop, name = 'data managing', args = ())
        self.log.info('Starting DATA THREAD')
        self.data_thread.start()
        self.log.debug('DATA THREAD Started')
Esempio n. 8
0
 def __init__(self, ip):
     self.log = Logger(MAIN_CLIENT_LOG_FILE, D_VERB)
     self.log.info('[MAIN THREAD] Instantiated client')
     self.receiving = False
     self.define_headers()
     self.targets = {}
     self.transmit = Queue.Queue()
     self.data_client = DataClient(self.transmit, ip)
     self.data_processor = DataProcessor(self.transmit, self.headers, self.targets)
     self.connect(ip)
Esempio n. 9
0
    def __init__(self, headers, data):
        self.log        = Logger(CPU_SERVER_LOG_FILE, D_VERB)
        self.step       = D_STEP
        self.timeout    = int(D_TIMEOUT / self.step)

        # Sync variables
        self.transmit = data
        self.run = True

        # Record var
        self.sys_prev_cpu = {key:0 for key in SYS_CPU_DATA}
        self.time = 0
        self.load = 0
        self.proc = dict()

        self.thr_start = threading.Thread(target = self.record_process, name = 'cpu_thread', args=(), kwargs={})
        self.log.info('[MAIN THREAD] starting CPU Thread')
        self.thr_start.start()
        self.log.debug('[MAIN THREAD] CPU Thread started')
Esempio n. 10
0
 def __init__(self, queue, headers, targets):
     self.log = Logger(PROC_CLIENT_LOG_FILE, level=D_VERB)
     # Main thread communication
     self.keep_running = True
     self.transmit = queue
     self.headers = headers
     self.targets = targets
     # print data
     self.printing = False
     self.base_data = None
     self.fig = ()
     self.ax = ()
     # store data
     self.local_store = False
     self.files = {}
     # Launching Thread
     self.thr = threading.Thread(target=self.process,
                                 args=(),
                                 name='process_thread')
     self.start()
Esempio n. 11
0
def test_dev():
    message_regex = "Error: Missing option '--rancher-url'."
    logger = Logger(name='test_cli', log_level=LogLevel.DEBUG)
    runner = CliRunner()
    with EnvironContext(
            CI_PROJECT_NAMESPACE='odin',
            CI_PROJECT_NAME='odin-api',
            LOG_LEVEL='TRACE',
            RANCHER_ENV='ODIN_DEV',
            RANCHER_STACK='odin-sandbox',
            # RANCHER_SERVICE='odin-portal',
            IMAGE='registry.gitlab.dev.cu.edu/odin/odin-api:22'):
        result = runner.invoke(
            cli.main, '--stack odin-sandbox '
            '--create-stack '
            '--create-service '
            '--image registry.gitlab.dev.cu.edu/odin/odin-api:22 '
            '--service-links "es-client=elasticsearch/es-client,kafka=kafka/kafka" '
            '--variables "SPRING_PROFILES_ACTIVE=dev|jasypt.encryptor.password=${KEY}" '
            '--labels "io.rancher.container.hostname_override=container_name,app=odin-api,[email protected],commit=b6ee1ef2,traefik.enable=true,traefik.http.routers.feature--2-row-level-security.rule=Host(`https://feature--2-row-level-security.sbx.odin.dev.cu.edu`),traefik.http.routers.feature--2-row-level-security.service=feature--2-row-level-security,traefik.http.services.feature--2-row-level-security.loadbalancer.server.port=8081,traefik.domain=odin.dev.cu.edu,io.rancher.container.pull_image=always"'
        )
        logger.info('OUTPUT:\r\n\r\n%s' % result.output)
Esempio n. 12
0
async def _update_model():
    from helpers import Logger
    from vendor.make_data import main as make_data_main
    from train import train

    logger = Logger()

    logger.info('dump_from_db_to_json')
    await dump_from_db_to_json('science3', './temp/')

    logger.info('making data')
    make_data_main('./temp/news.json', './News_Demo/data/')

    logger.info('training')
    train('HAN_0.json')
Esempio n. 13
0
 def __init__(self):
     self.logger = logger.Logger()
     self.indicators = dict()
     for i, d in enumerate(self.datas):
         if (i + 1) == len(self.datas): continue
         next_data = self.datas[i + 1]
         if not d._name + '_long' == next_data._name: continue
         self.indicators[d] = dict()
         inds = self.indicators[d]
         inds['close'] = d.close
         inds['fast_moving_average'] = bt.indicators.SMA(
             d.close, period=self.p.fast, plotname=str(self.p.fast) + '-Period SMA')
         inds['slow_moving_average'] = bt.indicators.SMA(
             d.close, period=self.p.slow, plotname=str(self.p.slow) + '-Period SMA')
         inds['crossover'] = bt.indicators.CrossOver(
             inds['fast_moving_average'], inds['slow_moving_average'])
Esempio n. 14
0
async def incoming_call_answered(data, answered_user_websocket):
    """ incoming call was answered by user. Send others about it and synchronize answered user interfaces """
    if 'call_uid' not in data or not data['call_uid']:
        return

    # if call not exists
    if not data['call_uid'] in Telephony.incoming_callings:
        return

    # get answered user info
    answered_agency_id, answered_user_id = await Connection.user_info_get_by_ws(
        answered_user_websocket)

    # send info to user who answered
    ws_list = await Connection.user_ws_get(answered_agency_id,
                                           answered_user_id)
    if ws_list:
        await asyncio.wait([
            ws.send(
                json.dumps({
                    "type": "incoming_call_answered",
                    "data": json.dumps('you answered')
                })) for ws in ws_list
        ])

    # send to others that call answered
    recipients = await Telephony.recipients_about_answer_get(
        call_uid=data['call_uid'], answered_user_id=answered_user_id)

    ws_list = await Connection.particular_users_in_agency_ws_get(
        answered_agency_id, recipients)
    if ws_list:
        await asyncio.wait([
            ws.send(
                json.dumps({
                    "type": "incoming_call_answered",
                    "data": "already answered by other user"
                })) for ws in ws_list
        ])

    from helpers import Logger
    listeners_error_logger = Logger('ws_server_errors')

    # unregister
    await Telephony.incoming_call_unregister(data['call_uid'])
Esempio n. 15
0
 def __init__(self, queue, headers, targets):
     self.log = Logger(PROC_CLIENT_LOG_FILE, level = D_VERB)
     # Main thread communication
     self.keep_running = True
     self.transmit = queue
     self.headers = headers
     self.targets = targets
     # print data
     self.printing = False
     self.base_data = None
     self.fig = ()
     self.ax = ()
     # store data
     self.local_store = False
     self.files = {}
     # Launching Thread
     self.thr = threading.Thread(target = self.process, args = (), name = 'process_thread')
     self.start()
Esempio n. 16
0
async def ws_router(request, ws):
    """ router for websocket responses """
    action = request.get('action', None)
    data = request.get('data', None)

    if not action:
        return

    # init router logger
    requests_ws_logger = Logger('requests_ws')
    await requests_ws_logger.prepare_and_save_record(
        level=Logger.INFO,
        message='action: "{}", data: "{}"'.format(action,
                                                  data if data else '-'),
        ws=ws)

    if action == 'incoming_call_answered':
        await incoming_call_answered(data, ws)
    elif action == 'destroy_session':
        await ws.close()
Esempio n. 17
0
    def insert_db(self, data, table_name, data_updated_till, id_prefix):
        """ Inserts data to a given table
            Parameters: data from the API
                        name of table
                        maximum datetime up to which data is already update in the table
                        prefix of id of table
        """
        print "inside insert_db method"
        db = get_db()

        # Fetching columns in API
        fieldnames_api = data.next()
        fieldnames_api = [
            item.lower().replace(" : ", "_").replace(" ",
                                                     "_").replace("-", "_")
            for item in fieldnames_api
        ]

        try:
            column_check_date_index = fieldnames_api.index(
                self.column_check_date)
        except:
            print "WARNING !! {} not found in API response, GOING TO INSERT ALL DATA TO DATABASE.".format(
                ' '.join(self.column_check_date.split('_')))
            Logger.log(
                'warning',
                "{} not found in API response, GOING TO INSERT ALL DATA TO DATABASE."
                .format(' '.join(self.column_check_date.split('_'))))
            column_check_date_index = None

        # Fetching columns already present in out DB table
        query = "show columns from {}".format(table_name)
        cursor = db.cursor()
        cursor.execute(query)
        result = cursor.fetchall()
        fieldnames_db = list()
        for item in result[1:]:
            fieldnames_db.append(item[0])

        difference = list(set(fieldnames_api) - set(fieldnames_db))
        if len(difference) > 0:
            print "found new column(s)."
            Logger.log('info', "found new column(s).")
            try:
                self.__alter_table_collumn_add(db, table_name, difference)
            except Exception as e:
                print "Exception during alter table :", e
                Logger.log('error',
                           "Exception during alter table : {}".format(e))
                return None

        # fields structure to build the insert query
        if table_name == "inventory_master_db":
            fields = "%s, " * (len(fieldnames_api) + 2)
        else:
            fields = "%s, " * (len(fieldnames_api) + 1)
        fields = fields[:-2]

        max_id = self.__get_current_max_id(db, id_prefix, table_name)
        max_id = int(max_id)

        # fields to build the query string for building the insert query
        query_str = ''

        for item in fieldnames_api:
            query_str += item + ", "
        query_str = query_str + self.nmk_id_field
        if table_name == "inventory_master_db":
            query_str = query_str + ", last_modified"

        # building the final insert query
        query = '''insert into {} ({}) values ({})'''.format(
            table_name, query_str, fields)

        cursor = db.cursor()

        # Append id in each row of data to be inserted in DB table
        final_data = list()
        for row in data:
            row = [str(item) for item in row]
            if (column_check_date_index is not None) and (data_updated_till
                                                          is not None):
                try:
                    current_row_date_value = row[column_check_date_index]
                    date = self.__get_datetime(current_row_date_value)
                except Exception as e:
                    continue
                if data_updated_till < date:
                    max_id += 1
                    final_data.append(
                        self.__append_id(id_prefix, row, max_id, table_name))
            else:
                max_id += 1
                final_data.append(
                    self.__append_id(id_prefix, row, max_id, table_name))

        if (column_check_date_index is not None) and (data_updated_till
                                                      is not None):
            print "Number of new row(s) found : {}".format(len(final_data))
            Logger.log(
                'info',
                "Number of new row(s) found : {}".format(len(final_data)))

        # If we have values to be inserted in table then we insert all data at once
        if len(final_data):
            try:
                print "inserting data into table '{}'".format(table_name)
                Logger.log('info',
                           "inserting data into table '{}'".format(table_name))
                row_count = cursor.executemany(query, final_data)
                db.commit()
                print "Number of row(s) inserted : {}".format(row_count)
                Logger.log('info',
                           "Number of row(s) inserted : {}".format(row_count))
            except Exception as e:
                print "Database insertion exception :", e
                Logger.log('error',
                           "Database insertion exception : {}".format(e))

        db.close()
Esempio n. 18
0
class CPUWatcher(object):
    # Could easily add irq, frag, pgfault, and vmem from bench/cpuload.
    # Which are worth watching?
    def __init__(self, headers, data):
        self.log        = Logger(CPU_SERVER_LOG_FILE, D_VERB)
        self.step       = D_STEP
        self.timeout    = int(D_TIMEOUT / self.step)

        # Sync variables
        self.transmit = data
        self.run = True

        # Record var
        self.sys_prev_cpu = {key:0 for key in SYS_CPU_DATA}
        self.time = 0
        self.load = 0
        self.proc = dict()

        self.thr_start = threading.Thread(target = self.record_process, name = 'cpu_thread', args=(), kwargs={})
        self.log.info('[MAIN THREAD] starting CPU Thread')
        self.thr_start.start()
        self.log.debug('[MAIN THREAD] CPU Thread started')

    def quit(self):
        #self.stop()
        self.run = False

    def start(self, target):
        """
        Starts recording process async.
        """
        ## self.directory = os.path.join(NAO_DATA_DIR, time.ctime())
        ## os.makedirs(self.directory)
        if target == 'system':
            self.proc['system'] = None
            self.log.info('[MAIN THREAD] Start watching system')
            return True
        else:
            pid = self._get_pid(target)
            if pid:
                self.proc[target] = {
                        'pid':pid,
                        'prev_cpu':{key:0 for key in PROC_CPU_DATA},
                    }
                return True
            else:
                self.log.error("Non valid process {}. Skipping this process".format(target))
                return False

    def stop(self, target):
        """
        Stops the recording process
        """
        if target in self.proc:
            del self.proc[target]
            self.log.info('[MAIN THREAD] Has stopped {}'.format(target))
            return_val = True
        else:
            self.log.error('[MAIN THREAD] Has been asked to stop {} while not recording'.format(target))
            reurn_val = False
        time.sleep(self.step)
        return return_val

    # init helpers
    def _get_pid(self, process):
        print 'ps aux | grep {}'.format(process)
        ps_result = os.popen('ps aux | grep {}'.format(process)).readlines()
        print ps_result
        tmp = []
        for res in ps_result:
            if '--proc' in res:
                tmp.append(res)
            if 'grep' in res:
                tmp.append(res)
        for proc in tmp:
            ps_result.remove(proc)
        if len(ps_result) != 1:
            pid = 0
        else:
            pid = ps_result[0].split()[1]
        return pid

    # record methods
    # refactor files should be handled by server data thread
    # only use transmission queue here

    def _record(self, tmp):
        self.transmit.put(tmp)
        self.log.debug('[CPU Thread] Has put to queue {}'.format(tmp))

    def record_process(self):
        self.log.debug('[CPU THREAD] In thread')
        count = 0                                                   # record loop var init
        while self.run:                # Timeout + stop() message
            if count < self.timeout:
                self.log.debug('[CPU THREAD] Processing')
                tmp = {}
                tme = self._get_time()                                  # sys time is used for several measures
                keys = self.proc.keys()
                for key in keys:
                    if key == 'system':
                        tmp[key] = self.get_sys_data(tme)               
                    else:
                        tmp[key] = self.get_proc_data(tme, key)
                if tmp:
                    self._record(tmp)
                count += self.step
                time.sleep(self.step)
            else:
                self.log.warn('[CPU THREAD] Timeout happened, should we change code to stop process?')
                time.sleep(1)
        print '[CPU THREAD] End of thread record'

    # record helpers

    def get_sys_data(self, tme):
        tmp_sys = self.get_sys_cpu_stat(tme)                        # SYS_MEM_DATA
        tmp_sys.update(self.get_sys_mem_stat())                     # SYS_CPU_DATA
        tmp_sys.update(self.get_load_avg())                         # LOAD_AVG
        tmp_sys['time'] = tme                                       # SYS_CPU_OTHER
        tmp_sys['load'] = 100 * (1 - tmp_sys['idle_time'])          # add frag, pgfault, any other?
        tmp_sys['timestamp'] = time.time()
        return tmp_sys

    def get_proc_data(self,tme, key):
        tmp_proc = self.get_proc_cpu_stat(key, tme)                 # PROC_CPU_DATA
        tmp_proc.update(self.get_proc_mem_stat(key))                # PROC_MEM_DATA
        tmp_proc['timestamp'] = time.time()
        return tmp_proc

    def _get_time(self):
        with open('/proc/stat') as cpu_stat:
            cpu_line = cpu_stat.readline()
        tmp = cpu_line.split()
        tmp = tmp[1:]
        tmp = map(float, tmp)
        now_time = sum(tmp)
        res = now_time - self.time
        self.time = now_time
        return res

    def get_load_avg(self):
        with open('/proc/loadavg', 'r') as load_file:
            line = load_file.readline()
        res = line.split()
        return {LOAD_AVG[i]:float(res[i]) for i in range(3)}

    def get_sys_cpu_stat(self, tme):
        res = dict()
        with open('/proc/stat') as cpu_stat:
            cpu_line = cpu_stat.readline()
        tmp = cpu_line.split()
        tmp = tmp[1:]
        tmp = map(float, tmp)
        tmp_sys_cpu = {
            'usr_time':tmp[0],
            'nice_time':tmp[1],
            'sys_time':tmp[2],
            'idle_time':tmp[3],
            'io_time':tmp[4],
            'irq_time':tmp[5],
            'softirq_time':tmp[6]
        }
        try:
            for key in SYS_CPU_DATA:
                res[key] = (tmp_sys_cpu[key] - self.sys_prev_cpu[key])  / tme
                self.sys_prev_cpu[key] = tmp_sys_cpu[key]
        except KeyError as e:
            print "key error {}".format(e.message)
        return res

    def get_proc_cpu_stat(self, process, sys_time):
        pid = self.proc[process]['pid']
        res = dict()
        with open("/proc/"+str(pid)+"/stat") as cpuinfo:
            line = cpuinfo.read()
        tmp = line.split()
        tmp = tmp[11:17]
        tmp = map(int, tmp)
        tmp_proc_cpu = {
            "utime":tmp[2],
            "cutime":tmp[4],
            "stime":tmp[3],
            "cstime":tmp[5],
            "majflt":tmp[0],
            "majcfault":tmp[1],
            #"time": sum(tmp[2:])
        }
        try:
            for key in PROC_CPU_DATA:
                if key != 'time':
                    res[key] = tmp_proc_cpu[key] - self.proc[process]['prev_cpu'][key] # divide by proc time?
                    self.proc[process]['prev_cpu'][key] = tmp_proc_cpu[key]
            res['time'] = (res['utime'] + res['stime']) / sys_time

        except KeyError as e:
            print "key error {}".format(e.message)
        return res

    def get_sys_mem_stat(self):
        """
            Returns a dict containing infos from /proc/meminfo
                - MemAvailable
                - MemFree
                - Buffers
                - Cached
        """
        res = dict()
        with open('/proc/meminfo') as meminfo:
            mem_list = meminfo.readlines()
        # Optimize if it takes too long
        for line in mem_list:
            tmp = line.split()
            tmp[0] = tmp[0].replace(':', '')
            if tmp[0] in SYS_MEM_DATA:
                res[tmp[0]] = int(tmp[1])
        if len(res) != len(SYS_MEM_DATA):
            raise Exception("Error: wrong parsing of /proc/meminfo")
        return res

    def get_proc_mem_stat(self, process):
        pid = self.proc[process]['pid']
        # Optimize if it takes too long
        res = dict()
        with open("/proc/"+str(pid)+"/status") as meminfo:
            mem_list = meminfo.readlines()
        for line in mem_list:
            tmp = line.split()
            tmp[0] = tmp[0].replace(':', '')
            if tmp[0] in PROC_MEM_DATA:
                res[tmp[0]] = tmp[1]
        return res
from pathlib import Path

from helpers import pickle_object, unpickle_object, ABSTRACT_DIR, TSV_DIR, PICKLE_DIR, Logger
from preprocessors import LSAPreprocessor

import pandas as pd
import numpy as np

from sklearn.model_selection import RepeatedStratifiedKFold, RepeatedKFold, StratifiedKFold, GridSearchCV, train_test_split
from sklearn.decomposition import TruncatedSVD

N_SPLITS = 2
logger = Logger()


def test_svd_components():
    logger.plog('Started Initial Feature Selection.')

    # preproc = LSAPreprocessor(ABSTRACT_DIR / 'abstracts.tsv', label_col='source', max_tfidf_features=10000)
    preproc = LSAPreprocessor(ABSTRACT_DIR / 'abstracts.tsv',
                              label_col='source',
                              max_tfidf_features=10000)

    logger.plog("Corpus Loaded")
    X = preproc.training_corpus['abstract']
    y = preproc.training_corpus['label']
    preproc.get_lsa_pipeline()

    # component_set =  (10, 75, 100, 500, 1000, 2500, 5000)
    # component_set = (50, 100, 250, 500, 1000, 2500, 5000)
    component_set = (7500, 9000)
Esempio n. 20
0
from collections import defaultdict
from typing import List, Optional

from fastapi import APIRouter, File, UploadFile, Form
from pydantic import BaseModel
import sqlalchemy

from analyze.strategy import StrategyPredictor
from database import database
from declaration import strategies, topics
from helpers import Logger
logger = Logger('predict')


class Counter:
    def __init__(self):
        self.dict = defaultdict(int)

    def __call__(self, item):
        self.dict[item] += 1

    def __getitem__(self, index):
        return self.dict[index]

    def get_max_items(self, n=5):
        return sorted([(key, value) for key, value in self.dict.items()],
                      key=lambda item: item[1],
                      reverse=True)[:n]


class PredictInput(BaseModel):
Esempio n. 21
0
class DataProcessor(object):
    def __init__(self, queue, headers, targets):
        self.log = Logger(PROC_CLIENT_LOG_FILE, level = D_VERB)
        # Main thread communication
        self.keep_running = True
        self.transmit = queue
        self.headers = headers
        self.targets = targets
        # print data
        self.printing = False
        self.base_data = None
        self.fig = ()
        self.ax = ()
        # store data
        self.local_store = False
        self.files = {}
        # Launching Thread
        self.thr = threading.Thread(target = self.process, args = (), name = 'process_thread')
        self.start()

    ###
    ###     Process Thread
    ###
    def start(self):
        self.log.info('[MAIN THREAD] Starting process thread')
        self.thr.start()
        self.log.debug('[MAIN THREAD] Process thread started')

    def stop(self):
        self.keep_running = False
        self.log.info('[MAIN THREAD] Asked processing thread end')

    def process(self):
        while self.keep_running:
            self.log.debug('[PROCESS THREAD] Getting data')
            try:
                data = self.transmit.get(timeout = 1)
                data = json.loads(data)
                self.log.debug('[PROCESS THREAD] Got data {}'.format(data))
                if self.printing:
                    to_print = self.build_print_data(data)
                    self.log.debug('[PROCESS THREAD] Printing')
                    multi_print_dic(self.base_data, self.print_data)
                    self.log.debug('[PROCESS THREAD] Printed')
                if self.local_store:
                    # self.build_store_data?
                    self.process_store(data)
                    #### To write: self.process_local
            except Empty:
                self.log.debug('[PROCESS THREAD] No data')
        self.log.info('[PROCESS THREAD] End of thread')

    ###
    ###         Print utilities
    ###
    def start_print(self):
        self.log.info('[MAIN THREAD] Start printing')
        self.build_print_headers()
        self.log.debug('[MAIN THREAD] Built headers')
        self.print_data = multi_init_print(self.base_data)
        self.log.debug('[MAIN THREAD] Graphics initiated')
        self.printing = True

    def stop_print(self):
        self.log.info('[MAIN THREAD] Stop printing')
        self.printing = False
        clear_print()

    def build_print_headers(self):
        ret = {}
        for types in self.targets:
            for instance in self.targets[types]:
                ret[instance]={}
                for data_field in self.headers[types]:
                    ret[instance][data_field] = []
        self.base_data = ret
        self.log.debug('[DATA THREAD] Header: {}'.format(self.base_data))

    def build_print_data(self, dico):
        for target in dico:
            for data_field in dico[target]:
                self.base_data[target][data_field].append(dico[target][data_field])
    ####
    ####        Storage utilities
    ####
    def process_store(self, dico):
        for target in self.files:
            try:
                if target == 'system':
                    res = [dico[target][data_field] for data_field in self.headers['system']]
                else:
                    res = [dico[target][data_field] for data_field in self.headers['process']]
            except AttributeError:
                res = range(len(dico))
            print >> self.files[target], list_to_csv(res)
            self.log.debug('[PROCESS THREAD] Stored {}'.format(list_to_csv(res)))

    def start_store(self, dirname = None):
        # Make record dir
        if not dirname:
            dirname = time.time()
        directory = os.path.join(DATA_DIR, dirname)
        self.log.info('[MAIN THREAD] Starting local storage in {}'.format(directory))
        if os.path.isdir(directory):
            shutil.rmtree(directory)
        os.makedirs(directory)
        self.log.debug('[MAIN THREAD] Made local record dir')

        # Open files
        for types in self.targets:
            for instance in self.targets[types]:
                filename = os.path.join(directory, instance)
                self.files[instance] = open(filename, 'w')
                self.log.debug('[MAIN THREAD] Opened {}'.format(filename))

        # Write headers
        for key in self.files:
            if key == 'system':
                print >> self.files[key], list_to_csv(self.headers['system'])
                self.log.debug('[MAIN THREAD] wrote {} in file {}'.format(list_to_csv(self.headers['system']), key))
            else:
                print >> self.files[key], list_to_csv(self.headers['process'])
                self.log.debug('[MAIN THREAD] wrote {} in file {}'.format(list_to_csv(self.headers['process']), key))

        # Ask start storing and return store file paths
        self.local_store = True
        self.log.debug('[MAIN THREAD] End start local')
        return [os.path.join(directory, instance) for instance in self.files]

    def stop_store(self):
        self.log.info('[MAIN THREAD] Stopping storage')
        self.local_store = False
        for key in self.files:
            self.files[key].close()
            self.log.debug('closed {}'.format(key))


    def reset_processor(self):
        self.files = {}
Esempio n. 22
0
import numpy as np
import time, csv
import matplotlib
from matplotlib import pyplot as plt
from helpers import Logger
from conf import *

N_COL = 2
PRINT_AXIS = range(15)
log = Logger(PRINT_CLIENT_LOG_FILE, D_VERB)

###
###     Non Dynamic prints
###

def print_file(file_path):
    with open(file_path) as csvfile:
        reader = csv.DictReader(csvfile)
        if os.path.basename(file_path) == 'system':
            headers = SYS_CPU_OTHER + LOAD_AVG + SYS_CPU_DATA + SYS_MEM_DATA
        else:
            headers =PROC_CPU_DATA + PROC_MEM_DATA
        print_data = {}
        for elem in headers:
            print_data[elem] = []
        print headers
        for row in reader:
            for elem in headers:
                print_data[elem].append(row[elem])
        for elem in headers:
            print 'print_data[{}]={}'.format(elem,print_data[elem])
Esempio n. 23
0
def main():
    logger = Logger.Logger()
    logger.Warning("Starting PSRTrade")
    Backtest().test()
Esempio n. 24
0
owlnet.cuda()
# print(owlnet)


# In[5]:


owlnet.ladder.suggested_in_size(20)


# In[6]:


owlnet_trainer = Trainer()
owlnet_logger = Logger({'loss':(list(),list())})

# LadderNet training parameters
owlnet_train_args = dict()
owlnet_train_args['lr'] = .01
owlnet_train_args['batch_size'] = 99
owlnet_train_args['epochs'] = 1
owlnet_train_args['report'] = 5
owlnet_train_args['crop'] = 81
owlnet_train_args['clip'] = 1
owlnet_train_args['decay'] = 0
# LadderNet loss parameters
owlnet_loss_args = {}

train_ds.set_crop(owlnet_train_args['crop'])
Esempio n. 25
0
async def http_router(path, request_headers):
    """ router for http responses """
    # if it is ws request
    if path == '/':
        return

    # init error logger
    error_logger = Logger('ws_server_errors')
    # init router logger
    requests_http_logger = Logger('requests_http')
    await requests_http_logger.prepare_and_save_record(
        level=Logger.INFO, message='path: "{}"'.format(path))

    if path.startswith('/health'):
        """ 
        get status of ws server
        sample http://127.0.0.1:6789/health
        """
        return HTTPStatus.OK, [], b'{"status": "ok"}\n'
    elif path.startswith('/telephony_incoming'):
        """ 
        when incoming call are
        sample http://127.0.0.1:6789/telephony_incoming?token=74a17b7f-7e19-41f4-aee4-95b52d899c0d&method=begin&call_uid=123-123-123&recipients={"agency_id":1,"users_ids":[101,102,100]}
        sample http://127.0.0.1:6789/telephony_incoming?token=74a17b7f-7e19-41f4-aee4-95b52d899c0d&method=cancel&call_uid=123-123-123&recipients={"agency_id":1}
        """
        parsed = urlparse(path)
        if parsed.query and \
                'recipients' in parse_qs(parsed.query) and \
                'token' in parse_qs(parsed.query) and \
                'method' in parse_qs(parsed.query) and \
                'call_uid' in parse_qs(parsed.query) and \
                await http_request_token_check(parse_qs(parsed.query)['token'][0]) and \
                await telephony_router(method=parse_qs(parsed.query)['method'][0],
                                       call_uid=parse_qs(parsed.query)['call_uid'][0],
                                       recipients=json.loads(parse_qs(parsed.query)['recipients'][0])):
            return HTTPStatus.OK, [], b'{"status": "success"}\n'
        else:
            await error_logger.prepare_and_save_record(
                level=Logger.ERROR,
                message='HTTP router error "{}"'.format(path))
            return HTTPStatus.OK, [], b'{"status": "error"}\n'
    elif path.startswith('/active_connections_get'):
        """
        get active connections from ws server memory
        sample http://127.0.0.1:6789/active_connections_get?token=74a17b7f-7e19-41f4-aee4-95b52d899c0d
        """
        parsed = urlparse(path)
        if parsed.query and \
                'token' in parse_qs(parsed.query) and \
                await http_request_token_check(parse_qs(parsed.query)['token'][0]):
            return HTTPStatus.OK, [], bytes(
                '{{"status": "success", "result": {0}}}\n'.format(
                    await Connection.connections_to_string()), 'utf-8')
        else:
            await error_logger.prepare_and_save_record(
                level=Logger.ERROR,
                message='HTTP router error "{}"'.format(path))
            return HTTPStatus.OK, [], b'{"status": "error"}\n'
    elif path.startswith('/incoming_callings_get'):
        """
        get active incoming callings from ws server memory
        sample http://127.0.0.1:6789/incoming_callings_get?token=74a17b7f-7e19-41f4-aee4-95b52d899c0d
        """
        parsed = urlparse(path)
        if parsed.query and \
                'token' in parse_qs(parsed.query) and \
                await http_request_token_check(parse_qs(parsed.query)['token'][0]):
            return HTTPStatus.OK, [], bytes(
                '{{"status": "success", "result": {0}}}\n'.format(
                    await Telephony.incoming_callings_to_string()), 'utf-8')
            return HTTPStatus.OK, [], bytes(
                '{}\n'.format(await Telephony.incoming_callings_to_string()),
                'utf-8')
        else:
            await error_logger.prepare_and_save_record(
                level=Logger.ERROR,
                message='HTTP router error "{}"'.format(path))
            return HTTPStatus.OK, [], b'{"status": "error"}\n'
Esempio n. 26
0
 def __init__(self):
     self.logger = Logger.Logger()
     # self.dataModel = Data.SecuritiesData(TDAmeritrade)
     self.dataModel = Data.SecuritiesData(AlphaVantage)
Esempio n. 27
0
                                process_request=routers.http_router)

ioloop = asyncio.get_event_loop()

# create tasks for async running
ioloop.create_task(
    online_users_get(configs.HOMECRM_SERVICES['online_users_get']))
ioloop.create_task(selections_get(configs.HOMECRM_SERVICES['selections_get']))
ioloop.create_task(
    notifications_get(configs.HOMECRM_SERVICES['notifications_get']))
ioloop.create_task(balance_check(configs.HOMECRM_SERVICES['balance_check']))

# inner tasks
ioloop.create_task(old_incoming_calls_remove())

ioloop.run_until_complete(Logger.loggers_init())
ioloop.run_until_complete(start_server)
ioloop.run_forever()

# server side
# todo limitation for ome user connections
# todo wss https://websockets.readthedocs.io/en/stable/intro.html#secure-server-example
# install pip install aiohttp[speedups] and add it to requirements.txt

# deploy side
# todo proxy ws requests via nginx

# crm side
# todo make methods for listeners
# todo make methods for auth
# todo install supervisord
Esempio n. 28
0
def my_main():
    # if __name__ == '__main__':
    Logger()

    # Loop through all tables provided in config
    for item in get_tables():
        try:
            time.sleep(5)

            print "Search id :", item['search_id']
            Logger.log('info', "Search id : {}".format(item['search_id']))
            searchId = item['search_id']
            table_name = item['table']

            obj = Data(searchId, table_name)
            print "fetching data from API."
            Logger.log('info', "fetching data from API.")
            data = obj.read_data()
            print data
            if data == None:
                print "No response from API"
                Logger.log('warning', "No response from API.")
            else:
                print "data fetched from API."
                Logger.log('info', "data fetched from API.")

            if item.get('truncate'):
                obj.truncate_table(table_name)
                print "table '{}' truncated.".format(table_name)
                Logger.log('info', "table '{}' truncated.".format(table_name))

            print "Calculating date till which data is updated."
            Logger.log('info', "Calculating date till which data is updated.")
            data_updated_till = obj.get_max_datetime(table_name)
            if data_updated_till:
                print "data updated till : {}".format(data_updated_till)
                Logger.log('info',
                           "data updated till : {}".format(data_updated_till))
            else:
                print "WARNING !! Unable to find max date, GOING TO INSERT ALL DATA TO DATABASE."
                Logger.log(
                    'warning',
                    "Unable to find max date, GOING TO INSERT ALL DATA TO DATABASE."
                )

            obj.insert_db(data, table_name, data_updated_till,
                          item['id_prefix'])
            print "Done ...!!"

        except Exception as e:
            print "Exception :", e
            Logger.log('error', "Exception : {}".format(e))
            traceback.print_exc()

        print "\n\n"
    return 'Success'
Esempio n. 29
0
 def __init__(self):
     self.logger = Logger.Logger()
Esempio n. 30
0
class DataClient(object):
    def __init__(self, queue, ip):
        self.log = Logger(DATA_CLIENT_LOG_FILE, D_VERB)
        self.log.info('[MAIN THREAD] Instantiatie data_client')
        self.transmit = queue
        self.receiving = False
        self.remote_ip = ip
        self.my_ip = socket.gethostbyname(socket.gethostname())

    def start(self):
        self.soc_data = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.soc_data.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.log.debug('[MAIN THREAD] Connecting to server data channel')
        self.soc_data.connect((self.remote_ip,SOC_PORT_DATA))
        self.log.info('[MAIN THREAD] Data Channel Connected')
        self.data_receive = threading.Thread(target = self.receive, args = ())
        self.log.info('[MAIN THREAD] Starting DATA THREAD')
        self.receiving = True
        self.data_receive.start()
        self.log.debug('[MAIN THREAD] DATA THREAD started')

    def stop(self):
        self.log.debug("[MAIN THREAD] Stop command sent")
        self.receiving = False
        self.log.info("[MAIN THREAD] Asked DATA THREAD to stop receiving")

    def receive(self):
        #FIXME_1 : recv_data is blocking. If nothing is sent and asked to stop, it will block program exit
        while self.receiving:
            self.log.debug('[DATA THREAD] waiting for data from server')
            data = recv_data(self.soc_data)
            self.log.debug('[DATA THREAD] Received data {}\n'.format(data))
            if data:
                self.transmit.put(data)
                self.log.debug('[DATA THREAD] Transmitted data ')
            else:   # Not sure this should exist
                self.log.info('[DATA THREAD] Empty data received. Closing socket ')
                self.soc_data.close()
                break
        if not self.receiving:
            self.log.info('[DATA THREAD] self.receiving is False. Closing socket ')
            self.soc_data.close()
        self.log.info('[DATA THREAD] Exiting thread \n')
Esempio n. 31
0
class DataProcessor(object):
    def __init__(self, queue, headers, targets):
        self.log = Logger(PROC_CLIENT_LOG_FILE, level=D_VERB)
        # Main thread communication
        self.keep_running = True
        self.transmit = queue
        self.headers = headers
        self.targets = targets
        # print data
        self.printing = False
        self.base_data = None
        self.fig = ()
        self.ax = ()
        # store data
        self.local_store = False
        self.files = {}
        # Launching Thread
        self.thr = threading.Thread(target=self.process,
                                    args=(),
                                    name='process_thread')
        self.start()

    ###
    ###     Process Thread
    ###
    def start(self):
        self.log.info('[MAIN THREAD] Starting process thread')
        self.thr.start()
        self.log.debug('[MAIN THREAD] Process thread started')

    def stop(self):
        self.keep_running = False
        self.log.info('[MAIN THREAD] Asked processing thread end')

    def process(self):
        while self.keep_running:
            self.log.debug('[PROCESS THREAD] Getting data')
            try:
                data = self.transmit.get(timeout=1)
                data = json.loads(data)
                self.log.debug('[PROCESS THREAD] Got data {}'.format(data))
                if self.printing:
                    to_print = self.build_print_data(data)
                    self.log.debug('[PROCESS THREAD] Printing')
                    multi_print_dic(self.base_data, self.print_data)
                    self.log.debug('[PROCESS THREAD] Printed')
                if self.local_store:
                    # self.build_store_data?
                    self.process_store(data)
                    #### To write: self.process_local
            except Empty:
                self.log.debug('[PROCESS THREAD] No data')
        self.log.info('[PROCESS THREAD] End of thread')

    ###
    ###         Print utilities
    ###
    def start_print(self):
        self.log.info('[MAIN THREAD] Start printing')
        self.build_print_headers()
        self.log.debug('[MAIN THREAD] Built headers')
        self.print_data = multi_init_print(self.base_data)
        self.log.debug('[MAIN THREAD] Graphics initiated')
        self.printing = True

    def stop_print(self):
        self.log.info('[MAIN THREAD] Stop printing')
        self.printing = False
        clear_print()

    def build_print_headers(self):
        ret = {}
        for types in self.targets:
            for instance in self.targets[types]:
                ret[instance] = {}
                for data_field in self.headers[types]:
                    ret[instance][data_field] = []
        self.base_data = ret
        self.log.debug('[DATA THREAD] Header: {}'.format(self.base_data))

    def build_print_data(self, dico):
        for target in dico:
            for data_field in dico[target]:
                self.base_data[target][data_field].append(
                    dico[target][data_field])

    ####
    ####        Storage utilities
    ####
    def process_store(self, dico):
        for target in self.files:
            try:
                if target == 'system':
                    res = [
                        dico[target][data_field]
                        for data_field in self.headers['system']
                    ]
                else:
                    res = [
                        dico[target][data_field]
                        for data_field in self.headers['process']
                    ]
            except AttributeError:
                res = range(len(dico))
            print >> self.files[target], list_to_csv(res)
            self.log.debug('[PROCESS THREAD] Stored {}'.format(
                list_to_csv(res)))

    def start_store(self, dirname=None):
        # Make record dir
        if not dirname:
            dirname = time.time()
        directory = os.path.join(DATA_DIR, dirname)
        self.log.info(
            '[MAIN THREAD] Starting local storage in {}'.format(directory))
        if os.path.isdir(directory):
            shutil.rmtree(directory)
        os.makedirs(directory)
        self.log.debug('[MAIN THREAD] Made local record dir')

        # Open files
        for types in self.targets:
            for instance in self.targets[types]:
                filename = os.path.join(directory, instance)
                self.files[instance] = open(filename, 'w')
                self.log.debug('[MAIN THREAD] Opened {}'.format(filename))

        # Write headers
        for key in self.files:
            if key == 'system':
                print >> self.files[key], list_to_csv(self.headers['system'])
                self.log.debug('[MAIN THREAD] wrote {} in file {}'.format(
                    list_to_csv(self.headers['system']), key))
            else:
                print >> self.files[key], list_to_csv(self.headers['process'])
                self.log.debug('[MAIN THREAD] wrote {} in file {}'.format(
                    list_to_csv(self.headers['process']), key))

        # Ask start storing and return store file paths
        self.local_store = True
        self.log.debug('[MAIN THREAD] End start local')
        return [os.path.join(directory, instance) for instance in self.files]

    def stop_store(self):
        self.log.info('[MAIN THREAD] Stopping storage')
        self.local_store = False
        for key in self.files:
            self.files[key].close()
            self.log.debug('closed {}'.format(key))

    def reset_processor(self):
        self.files = {}
Esempio n. 32
0
class LightClient(object):
    def __init__(self, ip):
        self.log = Logger(MAIN_CLIENT_LOG_FILE, D_VERB)
        self.log.info('[MAIN THREAD] Instantiated client')
        self.receiving = False
        self.define_headers()
        self.targets = {}
        self.transmit = Queue.Queue()
        self.data_client = DataClient(self.transmit, ip)
        self.data_processor = DataProcessor(self.transmit, self.headers,
                                            self.targets)
        self.connect(ip)

    def connect(self, ip):
        self.soc_ctrl = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.soc_ctrl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        my_ip = socket.gethostbyname('')
        self.log.debug('[MAIN THREAD] connecting...')
        self.soc_ctrl.connect((ip, SOC_PORT_CTRL))
        self.log.info('[MAIN THREAD] Client connected to server')

    def disconnect(self):
        ### data processor should not be here
        self.data_processor.stop()
        self.soc_ctrl.close()

    def define_headers(self):
        head = {}
        head['process'] = PROC_CPU_DATA + PROC_MEM_DATA + TIMESTAMPS
        head[
            'system'] = SYS_CPU_OTHER + LOAD_AVG + SYS_CPU_DATA + SYS_MEM_DATA + TIMESTAMPS
        self.headers = head

    def add_target(self, target, name):
        if target in self.targets:
            self.targets[target].append(name)
        else:
            self.targets[target] = [name]

    def remove_target(self, target, name):
        if target in self.targets:
            if name in self.targets[target]:
                self.targets[target].remove(name)
                self.log.info('[MAIN THREAD] Removed {} named {}'.format(
                    target, name))
            else:
                self.log.error(
                    '[MAIN THREAD] Asked to remove {} named {} while not recorded'
                    .format(target, name))
        else:
            self.log.error(
                '[MAIN THREAD] Asked to remove {} named {} while not recorded'.
                format(target, name))

    def start_record(self, target, name):
        self.log.debug('[MAIN THREAD] Asking server to start recording')
        msg = MSG_SEP.join([START_RECORD, target, name])
        answer = send_data(self.soc_ctrl, msg)
        self.log.info('[MAIN THREAD] Server asked to start recording')
        if answer == SYNC:
            self.add_target(target, name)
            self.log.info('[MAIN THREAD] Added {} named {}'.format(
                target, name))
        else:
            self.log.warn(
                '[MAIN THREAD] Could not add {} named {} because of server answer'
                .format(target, name))

    def stop_record(self, target, name):
        self.log.debug('[MAIN THREAD] Asking server to stop recording')
        msg = MSG_SEP.join([STOP_RECORD, target, name])
        answer = send_data(self.soc_ctrl, msg)
        self.log.info(
            '[MAIN THREAD] Server asked to stop recording {}'.format(name))
        if answer == SYNC:
            self.remove_target(target, name)
        else:
            self.log.warn(
                '[MAIN THREAD] Could not remove {} named {} because of server answer'
                .format(target, name))

    def start_receive(self):
        if not self.receiving:
            self.receiving = True
            self.log.debug('[MAIN THREAD] Asking server to start sending')
            status = send_data(self.soc_ctrl, START_SEND)
            self.log.info('[MAIN THREAD] Server asked to start sending')
            if status == FAIL:
                self.log.error(
                    '[MAIN THREAD] Client tried to receive but server denied it'
                )
            else:
                print status
                self.data_client.start()
                self.log.info('[MAIN THREAD] Client is receiving')
            self.log.debug("[MAIN THREAD] DATA THREAD started")
        else:
            self.log.warn(
                "[MAIN THREAD] Asked to start receiving while already receiving"
            )

    def stop_receive(self):
        if self.receiving:
            self.log.debug(
                '[MAIN THREAD] Closing data channel. Exiting data client thread'
            )
            self.data_client.stop()
            self.log.info("[MAIN THREAD] Asked server to stop receiving")
            self.receiving = False
            send_data(self.soc_ctrl, STOP_SEND)
        else:
            self.log.warn(
                "[MAIN THREAD] Asked to stop receiving while already receiving"
            )

    def start_store(self, dirname='easy_client'):
        return self.data_processor.start_store(dirname)

    def stop_store(self):
        self.data_processor.stop_store()

    def start_print(self):
        self.data_processor.start_print()

    def stop_print(self):
        self.printing = self.data_processor.stop_print()

    def stop_process(self):
        self.stop_print()
        self.stop_store()
        self.data_processor.stop()
        self.stop_receive()
        self.soc_ctrl.close()

    def stop_all(self):
        self.stop_process()
        send_data(self.soc_ctrl, STOP_ALL)
Esempio n. 33
0
class LightClient(object):
    def __init__(self, ip):
        self.log = Logger(MAIN_CLIENT_LOG_FILE, D_VERB)
        self.log.info('[MAIN THREAD] Instantiated client')
        self.receiving = False
        self.define_headers()
        self.targets = {}
        self.transmit = Queue.Queue()
        self.data_client = DataClient(self.transmit, ip)
        self.data_processor = DataProcessor(self.transmit, self.headers, self.targets)
        self.connect(ip)

    def connect(self, ip):
        self.soc_ctrl = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.soc_ctrl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        my_ip = socket.gethostbyname('')
        self.log.debug('[MAIN THREAD] connecting...')
        self.soc_ctrl.connect((ip,SOC_PORT_CTRL))
        self.log.info('[MAIN THREAD] Client connected to server')

    def disconnect(self):
        ### data processor should not be here
        self.data_processor.stop()
        self.soc_ctrl.close()

    def define_headers(self):
        head = {}
        head['process'] = PROC_CPU_DATA + PROC_MEM_DATA + TIMESTAMPS
        head['system']  = SYS_CPU_OTHER + LOAD_AVG + SYS_CPU_DATA + SYS_MEM_DATA + TIMESTAMPS
        self.headers = head

    def add_target(self, target, name):
        if target in self.targets:
            self.targets[target].append(name)
        else:
            self.targets[target]=[name]

    def remove_target(self, target, name):
       if target in self.targets:
           if name in self.targets[target]:
               self.targets[target].remove(name)
               self.log.info('[MAIN THREAD] Removed {} named {}'.format(target, name))
           else:
               self.log.error('[MAIN THREAD] Asked to remove {} named {} while not recorded'.format(target, name))
       else:
           self.log.error('[MAIN THREAD] Asked to remove {} named {} while not recorded'.format(target, name))

    def start_record(self, target, name):
        self.log.debug('[MAIN THREAD] Asking server to start recording')
        msg = MSG_SEP.join([START_RECORD, target, name])
        answer = send_data(self.soc_ctrl,msg)
        self.log.info('[MAIN THREAD] Server asked to start recording')
        if answer == SYNC:
            self.add_target(target, name)
            self.log.info('[MAIN THREAD] Added {} named {}'.format(target, name))
        else:
            self.log.warn('[MAIN THREAD] Could not add {} named {} because of server answer'.format(target, name))

    def stop_record(self, target, name):
        self.log.debug('[MAIN THREAD] Asking server to stop recording')
        msg = MSG_SEP.join([STOP_RECORD, target, name])
        answer = send_data(self.soc_ctrl,msg)
        self.log.info('[MAIN THREAD] Server asked to stop recording {}'.format(name))
        if answer == SYNC:
            self.remove_target(target, name)
        else:
            self.log.warn('[MAIN THREAD] Could not remove {} named {} because of server answer'.format(target, name))

    def start_receive(self):
        if not self.receiving:
            self.receiving = True
            self.log.debug('[MAIN THREAD] Asking server to start sending')
            status = send_data(self.soc_ctrl,START_SEND)
            self.log.info('[MAIN THREAD] Server asked to start sending')
            if status == FAIL:
                self.log.error('[MAIN THREAD] Client tried to receive but server denied it')
            else:
                print status
                self.data_client.start()
                self.log.info('[MAIN THREAD] Client is receiving')
            self.log.debug("[MAIN THREAD] DATA THREAD started")
        else:
            self.log.warn("[MAIN THREAD] Asked to start receiving while already receiving")

    def stop_receive(self):
        if self.receiving:
            self.log.debug('[MAIN THREAD] Closing data channel. Exiting data client thread')
            self.data_client.stop()
            self.log.info("[MAIN THREAD] Asked server to stop receiving")
            self.receiving = False
            send_data(self.soc_ctrl,STOP_SEND)
        else:
            self.log.warn("[MAIN THREAD] Asked to stop receiving while already receiving")

    def start_store(self, dirname = 'easy_client'):
        return self.data_processor.start_store(dirname)

    def stop_store(self):
        self.data_processor.stop_store()

    def start_print(self):
        self.data_processor.start_print()

    def stop_print(self):
        self.printing = self.data_processor.stop_print()

    def stop_process(self):
        self.stop_print()
        self.stop_store()
        self.data_processor.stop()
        self.stop_receive()
        self.soc_ctrl.close()


    def stop_all(self):
        self.stop_process()
        send_data(self.soc_ctrl, STOP_ALL)
# Make a new dict with GPIs as Keys and GpiStreams as values
gpi_stream_dict = {}


def create_gpi_table(gpi_stream_dict):
    num = 1
    for gpi, id in cf.gpi2stream.items():
        gpi_stream_dict[gpi] = GpiStream(num, id, gpi)
        num += 1


create_gpi_table(gpi_stream_dict)

# Test the logger
logger = Logger(socketio)


#logger.start()
def test_message():
    while (1):
        logger.log_message('Yo mama so fat')
        time.sleep(20)


t = th.Thread(target=test_message)
t.start()


@app.route('/')
@app.route('/command_center')
Esempio n. 35
0
class DataManager(object):
    def __init__(self, headers, transmit, connection_table):
        self.step = D_STEP
        self.timeout = int(D_TIMEOUT / self.step)

        self.log = Logger(DATA_SERVER_LOG_FILE, D_VERB)
        self.run = True

        self.receivers = []
        self.transmit = transmit
        self.connection_table = connection_table

        self.sys_headers = headers['system']
        self.proc_headers = headers['process']

        self.data_thread = threading.Thread(target = self.process_loop, name = 'data managing', args = ())
        self.log.info('Starting DATA THREAD')
        self.data_thread.start()
        self.log.debug('DATA THREAD Started')

    def process_loop(self):
        ###
        ### Add timeout so that we keep control when waiting for data
        ###
        while self.run:
            self.log.debug('[DATA THREAD] Waiting for queue')
            data = self.transmit.get()
            self.log.debug('[DATA THREAD] Got {}'.format(data))
            for socket in self.receivers:
                self.process_send(socket, data)

    def quit(self):
        self.run = False

    def start_send(self):
        self.init_thread = threading.Thread(target = self.init_connection, name = 'init_send_connection', args = ())
        self.log.info('[MAIN THREAD] Starting INIT THREAD')
        self.init_thread.start()
        self.log.debug('[MAIN THREAD] INIT THREAD Started')

    def init_connection(self):
        soc_data = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        soc_data.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        soc_data.bind(('', SOC_PORT_DATA))
        soc_data.listen(1)
        self.log.info('[INIT THREAD] Waiting for a connection')
        connection, client_address = soc_data.accept()
        self.log.info('[INIT THREAD] Connection accepted from {}'.format(client_address))
        self.receivers.append(connection)

    def process_send(self, connection, data):
        targets = self.get_client_targets(connection)
        self.log.debug('[DATA THREAD] targets are {}'.format(targets))
        sub_data = self.get_sub_dict(data, targets)
        self.log.debug('[DATA THREAD] sub_data is  {}'.format(sub_data))
        mess = json.dumps(sub_data)
        self.log.debug('[DATA THREAD] Sending data {}'.format(mess))
        status = send_data(connection, mess)
        if status == '':
            self.receivers.remove(connection)
            self.log.info('[DATA THREAD] connection removed')
        self.log.debug('[DATA THREAD] Data sent')

    def get_sub_dict(self, data, targets):
        return dict([(key, data[key]) for key in targets if key in data])

    def get_client_targets(self, connection):
        client_address = connection.getpeername()[0]
        targets = None
        for client in self.connection_table:
            self.log.debug('[DATA THREAD] Checking with potential address {} '.format(client.getpeername()))
            if client.getpeername()[0] == client_address:
                targets = self.connection_table[client]
        if targets is not None:
            return targets
        else:
            self.log.error('[DATA THREAD] Could not find client {} in connection table'.format(client_address))
            return []

    def stop_send(self):
        self.log.info('[MAIN THREAD] Stopping DATA THREAD')
        tmp = self.receivers
        self.receivers = []
        for elem in tmp:
            elem.close()
            self.log.debug('[MAIN THREAD] Closed data socket')

    def is_sending(self):
        if len(self.receivers) > 0:
            return True
        else:
            return False
Esempio n. 36
0
import os

import cv2
import numpy as np
import time

from neigh_search import LSHIndex as LSH, L2ExactIndex as L2
from feat_extract import FeatureExtractor

from helpers import Logger

labels = []
vectors = []
imgPath = "./data_gen/output/"

l = Logger()

images = [f for f in os.listdir(imgPath) if os.path.isfile(os.path.join(imgPath, f))]
total = len(images)
time_taken = 0

for i, image_name in enumerate(images):
    l.log("\033[2J\033[0;0H")
    l.log(f"Image {i+1} of {total}")

    filename = os.path.join(imgPath, image_name)

    l.log(f"reading {filename}")

    img = cv2.imread(filename, 0)
    img = np.reshape(img, (*img.shape, 1))
Esempio n. 37
0
 def __init__(self, provider):
     self.logger = Logger.Logger()
     self.provider = provider.Provider()