Ejemplo n.º 1
0
    def edit_vehicle_state(self, node_input):
        ''' Changes the vehicle state with a incoming
            node input.

            node_input : any NodeInput
                The node input that came from the interpreter. That gives
                the changes as an input for the vehicle state.

        '''
        input_type = type(node_input)

        if (input_type == DistanceNodeInput):
            node = self._get_node(self.distance_nodes, node_input)
            if (node is not None):
                node.update_variable_list(node_input.node_var_name,
                                          node_input.__dict__)
        elif (input_type == SteeringNodeInput):
            if (self.steering is not None):
                self.steering.update_variable_list(node_input.node_var_name,
                                                   node_input.__dict__)
        elif (input_type == LocalizationNodeInput):
            # TODO: add LocalizationNode for now less relevant
            pass
        elif (input_type == EngineNodeInput):
            node = self._get_node(self.engine_nodes, node_input)
            if (node is not None):
                node.update_variable_list(node_input.node_var_name,
                                          node_input.__dict__)
        elif (input_type == TemperatureNodeInput):
            # TODO: add TemperatureNode for now less relevant
            pass

        self.compressed_nodes = self._compress_nodes(self.distance_nodes,
                                                     self.temperature_nodes,
                                                     self.engine_nodes,
                                                     self.steering)
        # Closes previous shared memory block.
        if (self.shared_list is not None):
            self.shared_list.shm.close()
            self.shared_list.shm.unlink()
        try:
            # Tries to add new memory block with information
            # containing the nodes.
            self.shared_list = shared_memory.ShareableList(
                [self.compressed_nodes], name='shm_cargodash')
        except FileExistsError:
            # Old shared memory block is still open and needs to
            # be closed.
            temp_shm = shared_memory.ShareableList(name='shm_cargodash')
            temp_shm.shm.close()
            temp_shm.shm.unlink()
            # Now able to add wanted shared memory block.
            self.shared_list = shared_memory.ShareableList(
                [self.compressed_nodes], name='shm_cargodash')
Ejemplo n.º 2
0
 def __init__(self, addr=None, manager=None, mutex=None, format_list=None,
              size=0, ratio=2):
     if mutex is None:
         self.mutex = RLock()
     else:
         self.mutex = mutex
     if manager is None:
         self._manager = DummyManager()
     elif isinstance(manager, SharedMemoryManager) or isinstance(manager, DummyManager):
         self._manager = manager
     else:
         self._manager = SharedMemoryManager(manager)
     capacity = int(size*ratio)
     if capacity == 0:
         capacity = ratio
     with self.mutex:
         if addr is None:
             if format_list is None:
                 raise ValueError("Either addr or format_list must be provided")
             self._shl = self._manager.ShareableList(format_list)
             self._shl_addr = self._shl.shm.name
             self._shm = self._manager.SharedMemory(capacity)
             self._shm_addr = self._shm.name
             self._shl[0] = self._shm_addr
             self._shl[1] = int(size)
             self._shl[2] = int(capacity)
         else:
             self._shl_addr = addr
             self._shl = shared_memory.ShareableList(name=addr)
             self._shm_addr = self._shl[0]
             self._shm = shared_memory.SharedMemory(name=self._shm_addr)
Ejemplo n.º 3
0
    async def _extract_meta_data(
        self,
        allow_list: dict,
        cache_manager: CacheManager,
        shared_memory_name: str,
    ) -> dict:
        data = {}
        tasks = []

        shared_status = shared_memory.ShareableList(name=shared_memory_name)
        shared_status[0] = 0

        for metadata_extractor in self.metadata_extractors:
            if allow_list[metadata_extractor.key]:
                if (not cache_manager.bypass
                        and self.is_feature_whitelisted_for_cache(
                            metadata_extractor)
                        and cache_manager.is_host_predefined()
                        and cache_manager.is_enough_cached_data_present(
                            metadata_extractor.key)):
                    extracted_metadata: dict = (
                        cache_manager.get_predefined_metadata(
                            metadata_extractor.key))
                    data.update(extracted_metadata)
                    shared_status[0] += 1
                else:
                    tasks.append(metadata_extractor.start())

        extracted_metadata: tuple[dict] = await asyncio.gather(*tasks)
        shared_status[0] += len(tasks)
        data = {**data, **dict(ChainMap(*extracted_metadata))}
        return data
def _shared_memory_init(**kwargs):
    if not 'name' in kwargs:
        return
    if not 'data_dict' in kwargs:
        return
    shm = shared_memory.ShareableList([json.dumps(kwargs['data_dict'])], name=kwargs['name'])
    return shm
def _shared_memory_set_dict(**kwargs):
    if not 'name' in kwargs:
        return
    if not 'data_dict' in kwargs:
        return
    shm = shared_memory.ShareableList(name=kwargs['name'])
    shm[0] = json.dumps(kwargs['data_dict'])
Ejemplo n.º 6
0
 def __init__(self, name):
     # Set starting position of pointer
     self.question = []
     self.x = self.table.get_width() / 2 - self.pointer.get_width() / 2
     self.y = self.table.get_height() / 2 - 3 / 8 * self.pointer.get_height() / 2
     self.phase = 0
     # Initialize shared memory with interface
     self.shm = shared_memory.ShareableList(name=name)
Ejemplo n.º 7
0
    def __init_shared_memory(self, state):
        shm_list = []
        shm_list.append(0)  #amount of images (=12 be an unit)
        shm_list.append(1)  #amount of images (=12 state)
        for i in range(2, self.__shm_size):
            shm_list.append('null')

        if state == 0:
            self.shm_id = shared_memory.ShareableList(shm_list)
Ejemplo n.º 8
0
def get_data(smqn, sman, smdn, smln, smsn, l, port_name):

    #Apperantly this is the Correct way to do Shared Memory
    smq = shared_memory.ShareableList(name=smqn)
    sma = shared_memory.ShareableList(name=sman)
    smd = shared_memory.ShareableList(name=smdn)
    sml = shared_memory.ShareableList(name=smln)
    sms = shared_memory.ShareableList(name=smsn)

    lidar = RPLidar(None, port_name)
    lis = lidar.iter_scans

    #Speeding up locks
    la = lock.acquire
    lr = lock.release
    while True:  #Code Retry
        try:
            for scan in lis():
                la()  #Locking
                if sms[0]: break  #Graceful Shutdown Reciever
                sml[0] = int(len(scan))
                for x in range(0, sml[0]):
                    n = scan[x]
                    smq[x] = n[0]
                    sma[x] = n[1]
                    smd[x] = n[2]
                lr()  #Unlocking
        except RPLidarException as e:
            print('RPLidarException')
            print(e)
            break
        except ValueError as e:
            print('Failure Due to Access Bug')
            print(e)
            break
        except KeyboardInterrupt as e:
            pass

    #End of Daemon
    if l.locked(): lr()  #allow code to run
    lidar.stop()
    lidar.set_pwm(0)
    lidar.disconnect()
    print('SCAN STOPPED!')
Ejemplo n.º 9
0
 def ShareableList(self, sequence):
     """Returns a new ShareableList instance populated with the values
     from the input sequence, to be tracked by the manager."""
     with self._Client(self._address, authkey=self._authkey) as conn:
         sl = shared_memory.ShareableList(sequence)
         try:
             dispatch(conn, None, 'track_segment', (sl.shm.name, ))
         except BaseException as e:
             sl.shm.unlink()
             raise e
     return sl
Ejemplo n.º 10
0
    def start_test(self):
        self.threadTimer = AnimationTimer(0.5)
        self.shm = shared_memory.ShareableList([0], name='Progress')
        self.threadWork = Testing_function(self.data, self.neuralconfig,
                                           self.shm.shm.name)
        self.threadWork.done.connect(self.threadTimer.requestInterruption)
        self.threadWork.done.connect(self.load_graph)
        self.threadTimer.time_passes.connect(self.update_progress)

        self.threadWork.start()
        self.threadTimer.start()
Ejemplo n.º 11
0
 def _compute_models(self, values):
     data = list()
     shm = shared_memory.ShareableList(name=self.shm_name)
     for value in values:
         model = self._create_new_model(value)
         data.append(
             (value,
              self.neuralconfig.test_model(model,
                                           self.neuralconfig.testdata)))
         shm[0] += 1
     return data
 def __init__(self, fm_process_que, td_que, shm_name, shm_size):
     self.__set_font.config(family='courier new', size=15)
     threading.Thread.__init__(self)
     self.fm_process_queue = fm_process_que
     self.td_queue = td_que
     self.pym = PYM.LOG(True)
     self.shm_id = shared_memory.ShareableList(name=shm_name)
     self.__interval = 1 
     self.__sys_file = None
     self.__cur_target_index = 0
     self.__this_round_end_index = 0
     self.pym.PY_LOG(False, 'D', self.__log_name, 'init finished')
Ejemplo n.º 13
0
def uncompress_logging_information():
    ''' Uncompresses the shared logging information.

        Returns the logs as string.
    '''
    try:
        shared_logs = shared_memory.ShareableList(name='shm_buff_data')
    except FileNotFoundError:
        return {'message': 'Cannot fetch buffered logger'}
    all_logs = zl.decompress(shared_logs[0]).decode('UTF-8')
    all_logs = '"'.join(all_logs.split("'"))
    logs_as_json = json.loads(all_logs)
    return logs_as_json
Ejemplo n.º 14
0
def uncompress_nodes_information():
    ''' Uncompresses the shared node information.

        Return the nodes as json.
    '''
    try:
        shared_dict = shared_memory.ShareableList(name='shm_cargodash')
    except FileNotFoundError:
        return uncompress_nodes_information()
    all_nodes = zl.decompress(shared_dict[0]).decode('UTF-8')
    all_nodes = '"'.join(all_nodes.split("'")).replace(
        'False', 'false').replace('True', 'true')
    nodes_as_json = json.loads(all_nodes)
    return nodes_as_json
Ejemplo n.º 15
0
    def __init__(self, name: str, data: dict, lock):
        """
        Standard constructor.

        Builds a shared memory object or fetches it if it already exists.

        :param name: Name of the memory object
        :param data: Dictionary of values to store
        :param lock: Named lock instance
        """
        self._name = name
        self._data = data
        self._lock = lock

        # Create a mapping dictionary which remembers positions of keys for faster access
        self._lookup = {k: i for i, k in enumerate(data)}

        # Create a shared memory object to store the data or fetch the existing one
        try:
            self._shm = _shm.ShareableList(tuple(v for v in data.values()), name=name)
            _Log.info(f"Successfully created shared memory \"{name}\" with a total of {len(data)} keys")
        except FileExistsError:
            self._shm = _shm.ShareableList(name=name)
def _shared_memory_del(**kwargs):
    if not 'name' in kwargs:
        return
    shm = shared_memory.ShareableList(**kwargs)
    shm.shm.close()
    shm.shm.unlink()
import signal
import json

from flask import Flask, request
from multiprocessing import Process

from mqtt2timescale.postgres_connector import TimescaleConnector
from mqtt2timescale.mqtt_app import setup_mqtt
from utils.environment import Mqtt2TimescaleEnvironment

logging.basicConfig(
    format='%(asctime)s [%(levelname)s] \t %(message)s',
    level=logging.DEBUG,
    datefmt='%Y-%m-%d %H:%M:%S')

kill_switch = shared_memory.ShareableList([False]) # Alloc Memory for kill_switch


def health_event_loop(args):
    app = Flask(__name__)

    @app.route('/api/health', methods=['GET'])  # allow both GET and POST requests
    def health():
        logging.info(request.get_data())
        return json.dumps({'UP': "OK"}), 200, {'ContentType': 'application/json'}

    port = args.host.split(":")[1]
    app.run(host='0.0.0.0', port=port)


def sigterm_handler(_signo, _stack_frame):
    "beacons_list": [
        {
            "mac":"33",
            "rssi": -51,
            "tx": 50.5,
        },
        {
            "mac":"33",
            "rssi": -51,
            "tx": 50.5,
        },
    ]
}

data_str = json.dumps(data)
shm_a = shared_memory.ShareableList([data_str], name="beacons")

shm_b = shared_memory.ShareableList(name="beacons")
data_dict = json.loads(shm_b[0])
data_dict

data_dict['nearest_beacon']['mac'] = "66:33"
shm_b[0] = json.dumps(data_dict)
shm_a[0]

shm_b.shm.close()   
shm_a.shm.close()
shm_a.shm.unlink()


def _shared_memory_init(**kwargs):
Ejemplo n.º 19
0
    def start(self, message: dict) -> dict:

        self._logger.debug(
            f"Start metadata_manager at {time.perf_counter() - global_start} since start"
        )

        shared_status = shared_memory.ShareableList(
            name=message[MESSAGE_SHARED_MEMORY_NAME])
        url = message[MESSAGE_URL]
        if len(url) > 1024:
            url = url[0:1024]
        shared_status[1] = url

        website_manager = WebsiteManager.get_instance()
        self._logger.debug(
            f"WebsiteManager initialized at {time.perf_counter() - global_start} since start"
        )
        website_manager.load_website_data(message=message)

        self._logger.debug(
            f"WebsiteManager loaded at {time.perf_counter() - global_start} since start"
        )
        cache_manager = CacheManager.get_instance()
        cache_manager.update_to_current_domain(
            website_manager.website_data.domain,
            bypass=message[MESSAGE_BYPASS_CACHE],
        )

        now = time.perf_counter()
        self._logger.debug(
            f"starting_extraction at {now - global_start} since start")
        starting_extraction = get_utc_now()
        if website_manager.website_data.html == "":
            exception = "Empty html. Potentially, splash failed."
            extracted_meta_data = {MESSAGE_EXCEPTION: exception}
        else:
            try:
                extracted_meta_data = asyncio.run(
                    self._extract_meta_data(
                        allow_list=message[MESSAGE_ALLOW_LIST],
                        cache_manager=cache_manager,
                        shared_memory_name=message[MESSAGE_SHARED_MEMORY_NAME],
                    ))
                self.cache_data(
                    extracted_meta_data,
                    cache_manager,
                    allow_list=message[MESSAGE_ALLOW_LIST],
                )
            except ConnectionError as e:
                exception = f"Connection error extracting metadata: '{e.args}'"
                self._logger.exception(
                    exception,
                    exc_info=True,
                )
                extracted_meta_data = {MESSAGE_EXCEPTION: exception}
            except Exception as e:
                exception = (
                    f"Unknown exception from extracting metadata: '{e.args}'. "
                    f"{''.join(traceback.format_exception(None, e, e.__traceback__))}"
                )
                self._logger.exception(
                    exception,
                    exc_info=True,
                )
                extracted_meta_data = {MESSAGE_EXCEPTION: exception}

        self._logger.debug(
            f"extracted_meta_data at {time.perf_counter() - global_start} since start"
        )
        extracted_meta_data.update({
            "time_for_extraction": get_utc_now() - starting_extraction,
            **website_manager.get_website_data_to_log(),
        })

        website_manager.reset()
        cache_manager.reset()
        shared_status[1] = ""

        self._logger.debug(
            f"website_manager.reset() at {time.perf_counter() - global_start} since start"
        )
        return extracted_meta_data
Ejemplo n.º 20
0
from lib.settings import NUMBER_OF_EXTRACTORS, VERSION
from lib.timing import get_utc_now
from lib.tools import is_production_environment

app = FastAPI(title=METADATA_EXTRACTOR, version=VERSION)
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["POST", "GET", "OPTIONS", "PUT", "DELETE"],
    allow_headers=["*"],
)
# noinspection PyTypeHints
app.communicator: QueueCommunicator

shared_status = shared_memory.ShareableList([0, " " * 1024])
db.base.create_metadata(db.base.database_engine)


def _convert_dict_to_output_model(
    meta: dict, debug: bool = False
) -> ExtractorTags:
    extractor_tags = ExtractorTags()
    for key in ExtractorTags.__fields__.keys():
        if key in meta.keys() and VALUES in meta[key]:

            if not debug or TIME_REQUIRED not in meta[key].keys():
                meta[key][TIME_REQUIRED] = None

            extractor_tags.__setattr__(
                key,
Ejemplo n.º 21
0
                print(len(sma))
                print(len(smq))
                print(len(sml))
                print('Exception Handled')
            #Retry Frame Once
            process_data(smq, sma, smd, sml, calls + 1, dp)
    else:
        if debug: print('Dropped Frame')
        pass


#Now the Actual Code

#Run Before Connect
l = Lock()
smq = shared_memory.ShareableList([0.0] * 400)  #Quality
sma = shared_memory.ShareableList([0.0] * 400)  #Angle
smd = shared_memory.ShareableList([0.0] * 400)  #Distance
sml = shared_memory.ShareableList([0])  #Current Array Length
sms = shared_memory.ShareableList([False])  #Stop Signal
#Apperantly You have to Pass the Names, not the Objects of the Shared Memory
p1 = multiprocessing.Process(target=get_data,
                             args=(smq.shm.name, sma.shm.name, smd.shm.name,
                                   sml.shm.name, sms.shm.name, l, port_name),
                             daemon=True)
p1.start()  #Start Daemon

#Continuity Loop
while True:

    #Do Not Change Anything Below, All of this is Security
Ejemplo n.º 22
0

def resource_path(relative_path):
    """ Get absolute path to resource, works for dev and for PyInstaller """
    try:
        # PyInstaller creates a temp folder and stores path in _MEIPASS
        base_path = sys._MEIPASS
    except Exception:
        base_path = os.path.abspath(".")
    return os.path.join(base_path, relative_path)


logging.basicConfig(filename=resource_path('logs.log'),
                    level=logging.DEBUG)  #Create our logging file

shared_mem = shared_memory.ShareableList(
    [False, False, 0.0, 1, 1, 0, None, 0.2])
# 0 - deface_executing = False # video en cours de modification ?
# 1 - deface_finish = False # le floutage vient de se terminer ?
# 2 - progress = 0.0 # progress bar en %
# 3 - files_to_blur = 1 # nb de fichiers à flouter
# 4 - file_being_blur = 1 # numéro du fichier en train de se faire flouter
# 5 - rotate = 0
# 6 - frame_rate = None
# 7 - threshold = 0.2


class App:
    def __init__(self, folder_path_destination=None):

        DARK_BG = '#111'
        LIGHT_DARK_BG = '#333'
Ejemplo n.º 23
0
def get_data(smin, smon, smfn, smsn, l, port_name):

    #Apperantly this is the correct way to do shared memory
    smi = shared_memory.SharedMemory(name=smin)  #Image (write only)
    smo = shared_memory.SharedMemory(name=smon)  #Options (read only)
    smf = shared_memory.ShareableList(name=smfn)  #Frame rate (read only)
    sms = shared_memory.ShareableList(name=smsn)  #Image size (write only)

    #Speedups
    la = l.acquire
    lr = l.release
    cvt = cv2.cvtColor
    bgr = cv2.COLOR_BGR2RGB
    ifa = Image.fromarray
    bio = io.BytesIO
    tc = time.perf_counter
    ts = time.sleep
    smib = smi.buf
    smob = smo.buf
    try:
        while True:  #Retry loop

            #Init Loop
            while True:
                la()
                runn = smob[0]
                qual = smob[1]
                stop = smob[2]
                fram = smf[0]
                if stop: break
                if not runn:
                    lr()
                    ts(0.25)  #Check options every 250ms
                else:
                    break

            #Capture loop
            if not stop:
                cap = cv2.VideoCapture(port_name)
                lr()
                cr = cap.read
            while not stop:
                a = tc()  #Start time for frame limiting

                #Image grabbing goes here
                ret, frame = cr()
                frame = cvt(frame, bgr)
                img = ifa(frame)

                #Converting raw image into compressed JPEG bytes
                with bio() as output:
                    img.save(output, format="JPEG", quality=qual)
                    la()
                    content = output.getvalue()
                    y = len(content)
                    sms[0] = y  #Set Image Size
                    smib[:y] = content
                runn = smob[0]
                qual = smob[1]
                stop = smob[2]
                fram = smf[0]
                lr()
                if (not runn) or stop: break

                #Frame rate limiter
                b = tc()  #End time for frame limiting
                c = b - a
                if fram - c > 0.0:
                    ts(fram - c)  #Delay remaining seconds

            #Breaker bar
            cap.release()
            if stop: break

    except ValueError as e:
        print('Failure Due to Access Bug')
        print(e)
    except KeyboardInterrupt as e:
        pass

    #End of daemon
    if l.locked(): lr()  #Allow code to run
    try:
        cap.release()  #Attempt to close camera if error
    except:
        pass
    smi.close()
    smo.close()
    smf.shm.close()
    sms.shm.close()
    print('CAMERA STOPPED!')
def _shared_memory_get_dict(**kwargs):
    if not 'name' in kwargs:
        return
    shm = shared_memory.ShareableList(**kwargs)
    return json.loads(shm[0])
Ejemplo n.º 25
0
            print('awwww shiet, we try agen')



#main program
if __name__ == '__main__':

    #initialising variables
    global current_state
    current_state= 0
    client = init_NLP()
    client.sendall(bytes('BlENDER', 'UTF-8'))
    status = client.recv(1024)
    print(status.decode())

    location = shared_memory.ShareableList([False, 0, 0], name='coords') # updated, x , y
    #cv_client = Process(target=init_NLP_2, args=(location,))
    #cv_client.start()
    update_coord = threading.Thread(target=check_coords, args=(location,))
    # cv_update = threading.Thread(target=recv_cords, args=(client,))
    # To get state from server without blocking the main program
    newthread = ClientThread(client)
    newthread.start()
    nlp_control = threading.Thread(target=start_nlp, args=(client, ))
    nlp_control.start()
    update_coord.start()
    #cv_update.start()
    while True:
        if current_state == 0:
            #print("Idle State")
            #do something
Ejemplo n.º 26
0
def loop():
    to_show = ''
    disable = True
    seance_started = False
    questions = [speech_to_text.Question("Ile masz lat?", '', '70'),
                 speech_to_text.Question("Jak masz na imię?", "Jak ci na imię?", "Jan"),
                 speech_to_text.Question("Czy jesteś przyjazny?", 'Jesteś przyjazny?', "Tak"),
                 speech_to_text.Question("Kto cię zabił?", '', "Ty")]
    question = []
    shm_gm = shared_memory.ShareableList(["Bardzo dluga odpowiedz", False])
    game = table.Game(shm_gm.shm.name)
    for q in questions:
        question.append(q.question)
    layout = [
        [sg.Listbox(question, key='-QUESTION-', enable_events=True, size=(53, 17)),
         sg.Multiline(to_show, key='-Text-', size=(53, 18), disabled=True)],
        [sg.Text("Pytanie: "), sg.In(key='-Q-'), sg.Text("Alias: "), sg.In(key='-Al-')],
        [sg.Column([[sg.Text("Odpowiedź: "), sg.In(key='-A-')]], vertical_alignment='center',
                   justification='center')],
        [sg.Column(
            [[sg.Button("Dodaj pytanie", disabled=shm_gm[1]),
              sg.Button("Rozpocznij seans", disabled=shm_gm[1]),
              sg.Button("Uruchom wybrane pytanie", disabled=disable),
              sg.Button("Zakończ seans", disabled=not shm_gm[1])]],
            vertical_alignment='center', justification='center')]
        ]

    window = sg.Window("Zwirtualizowana Tablica Ouija", layout, resizable=False, size=(800, 450))
    shm_gm[0] = ''

    while True:
        event, values = window.read()
        if event == sg.WIN_CLOSED:
            break
        if shm_gm[1]:
            if seance_started:
                if not questions[window.Element('-QUESTION-').Widget.curselection()[0]].answered:
                    disable = False
                    window["Uruchom wybrane pytanie"].update(disabled=disable)
                else:
                    disable = True
                    window["Uruchom wybrane pytanie"].update(disabled=disable)
                window["Dodaj pytanie"].update(disabled=shm_gm[1])
                window["Rozpocznij seans"].update(disabled=shm_gm[1])
                window["Zakończ seans"].update(disabled=not shm_gm[1])
        else:
            for q in questions:
                q.answered = False
            disable = True
            window["Uruchom wybrane pytanie"].update(disabled=disable)
            window["Dodaj pytanie"].update(disabled=shm_gm[1])
            window["Rozpocznij seans"].update(disabled=shm_gm[1])
            window["Zakończ seans"].update(disabled=not shm_gm[1])
        if event == '-QUESTION-':
            to_show = "Pytanie: " + questions[window.Element('-QUESTION-').Widget.curselection()[0]].question + \
                      "\n\nAlias: " + questions[window.Element('-QUESTION-').Widget.curselection()[0]].alias + \
                      "\n\nOdpowiedź: " + questions[window.Element('-QUESTION-').Widget.curselection()[0]].answer.lower()
            window["-Text-"].update(to_show)
        if event == 'Dodaj pytanie':
            # raise an error if question field is not filled
            if not values['-Q-']:
                info_window = sg.Window("Błąd", [[sg.Text("Pole pytanie nie może być puste!")]])
                info_window.read()
            # raise an error if answer field is not filled
            elif not values['-A-']:
                info_window = sg.Window("Błąd", [[sg.Text("Pole odpowiedź nie może być puste!")]])
                info_window.read()
            # nullify alias field if alias was not provided
            elif not values['-Al-']:
                questions.append(speech_to_text.Question(values['-Q-'], '', values['-A-']))
                question.append(values['-Q-'])
                window["-QUESTION-"].update(question)
                window['-Q-'].update('')
                window['-Al-'].update('')
                window['-A-'].update('')
            else:
                questions.append(speech_to_text.Question(values['-Q-'], values['-Al-'], values['-A-']))
                question.append(values['-Q-'])
                window["-QUESTION-"].update(question)
                window['-Q-'].update('')
                window['-Al-'].update('')
                window['-A-'].update('')
        if event == "Rozpocznij seans":
            if len(questions) != 0:
                seance_started = True
                shm_gm[1] = True
                window["Dodaj pytanie"].update(disabled=shm_gm[1])
                window["Rozpocznij seans"].update(disabled=shm_gm[1])
                window["Zakończ seans"].update(disabled=not shm_gm[1])
                # create additional process for table
                Process(target=game.run_game, args=(questions, )).start()
            else:
                info_window = sg.Window("Błąd", [[sg.Text("Przed rozpoczęciem seansu dodaj pytania i odpowiedzi!")]])
                info_window.read()
        if event == "Uruchom wybrane pytanie":
            if not questions[window.Element('-QUESTION-').Widget.curselection()[0]].answered:
                shm_gm[0] = questions[window.Element('-QUESTION-').Widget.curselection()[0]].answer
                questions[window.Element('-QUESTION-').Widget.curselection()[0]].answered = True
        if event == "Zakończ seans":
            shm_gm[1] = False
            seance_started = False
            window["Dodaj pytanie"].update(disabled=shm_gm[1])
            window["Uruchom wybrane pytanie"].update(disabled=disable)
            window["Rozpocznij seans"].update(disabled=shm_gm[1])
            window["Zakończ seans"].update(disabled=not shm_gm[1])
Ejemplo n.º 27
0
def main():

    db_store = 'sentinel.db'
    db_manuf = str(os.path.dirname(__file__)) + '/db/manuf'

    if sys.argv[1:]:

        if sys.argv[1] == '--version':
            print(__version__)
            sys.exit(0)

        if sys.argv[1] == 'manuf':
            mac = sys.argv[2]
            mfname = store.get_manuf(mac, db_manuf)
            print(mfname)
            sys.exit(0)

        if sys.argv[1] == 'arps':
            printArps()
            sys.exit(0)

        if sys.argv[1] == 'list-macs':
            store.print_all(db_store)
            sys.exit(0)

        if sys.argv[1] == 'update-manuf':
            mac = sys.argv[2]
            mfname = store.get_manuf(mac, db_manuf)
            update = store.update_data_manuf(mac, mfname, db_store)
            print(update)
            sys.exit(0)

        if sys.argv[1] == 'rdns':
            ip = sys.argv[2]
            try:
                srv = sys.argv[3]
            except IndexError:
                srv = None
            dnsname = tools.getNSlookup(ip, srv)
            print(dnsname)
            sys.exit(0)

        if sys.argv[1] == 'update-dns':
            mac = sys.argv[2]
            ip = sys.argv[3]
            #dnsname = tools.getDNSName(ip)
            #update = store.update_data_dns(mac, dnsname, db_store)
            import threading
            dns = store.DNSUpDateTask()
            t = threading.Thread(target=dns.run, args=(
                mac,
                ip,
                db_store,
            ))
            t.start()
            #print(t)
            sys.exit(0)

        if sys.argv[1] == 'ping-net':
            ip = sys.argv[2]
            pn = tools.pingNet(ip)
            print(pn)
            sys.exit(0)

        if sys.argv[1] == 'nmap-net':
            ip = sys.argv[2]
            pn = tools.nmapNet(ip)
            print(pn)
            sys.exit(0)

        if sys.argv[1] == 'listening':
            p = tools.printListenPorts()
            sys.exit(0)

        if sys.argv[1] == 'listening-detailed':
            p = tools.printListenPortsDetailed()
            sys.exit(0)

        if sys.argv[1] == 'listening-details':
            port = sys.argv[2]
            p = tools.printLsOfPort(port)
            sys.exit(0)

        if sys.argv[1] == 'listening-allowed':
            p = store.printListeningAllowed(db_store)
            sys.exit(0)

        if sys.argv[1] == 'listening-allow':
            port = sys.argv[2]
            insert = store.insertAllowedPort(port, db_store)
            print(insert)
            sys.exit(0)

        if sys.argv[1] == 'listening-remove':
            port = sys.argv[2]
            remove = store.deleteAllowedPort(port, db_store)
            print(remove)
            sys.exit(0)

        if sys.argv[1] == 'listening-alerts':
            alerts = store.printListeningAlerts(db_store)
            sys.exit(0)

        if sys.argv[1] == 'established':
            established = tools.printEstablished()
            sys.exit(0)

        if sys.argv[1] == 'established-lsof':
            established = tools.printEstablishedLsOf()
            sys.exit(0)

        if sys.argv[1] == 'established-rules':
            established_rules = tools.printEstablishedRules(db_store)
            sys.exit(0)

        if sys.argv[1] == 'established-rule':
            #established-rule proto laddr lport faddr fport
            rule = sys.argv[2]
            proto = sys.argv[3]
            laddr = sys.argv[4]
            lport = sys.argv[5]
            faddr = sys.argv[6]
            fport = sys.argv[7]
            insert_rule = store.insertEstablishedRules(rule, proto, laddr,
                                                       lport, faddr, fport,
                                                       db_store)
            sys.exit(0)

        if sys.argv[1] == 'established-rules-filter':
            print_alerts = tools.printEstablishedRulesMatch(db_store)
            sys.exit(0)

        if sys.argv[1] == 'established-alerts':
            print_alerts = tools.printEstablishedAlerts(db_store)
            sys.exit(0)

        if sys.argv[1] == 'delete-established-rule':
            rowid = sys.argv[2]
            delete = store.deleteFromRowid('established', rowid, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'clear-established-rules':
            clear = store.clearAll('established', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'clear-configs':
            clear = store.clearAll('configs', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'lsof':
            port = sys.argv[2]
            lsof = tools.printLsOfPort(port)
            sys.exit(0)

        if sys.argv[1] == 'nmap':
            ip = sys.argv[2]
            try:
                level = sys.argv[3]
            except IndexError:
                level = 1
            scan = tools.nmapScan(ip, level)
            update = store.replaceNmaps(ip, scan, db_store)
            print(str(update) + ' ' + str(scan))
            sys.exit(0)

        if sys.argv[1] == 'list-ips':
            rows = store.selectAll('ips', db_store)
            for row in rows:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'add-ip':
            ip = sys.argv[2]
            insert = store.insertIPs(ip, db_store)
            print(insert)
            sys.exit(0)

        if sys.argv[1] == 'del-ip':
            ip = sys.argv[2]
            _del = store.deleteIPs(ip, db_store)
            print(_del)
            sys.exit(0)

        if sys.argv[1] == 'update-ip':
            ip = sys.argv[2]
            data = sys.argv[3]

            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            replace = store.replaceINTO('ips', ip, data, db_store)
            print(replace)
            sys.exit(0)

        if sys.argv[1] == 'update-ip-item':
            name = sys.argv[2]
            item = sys.argv[3]
            val = sys.argv[4]
            update = store.updateDataItem(item, val, 'ips', name, db_store)
            print(update)
            sys.exit(0)

        if sys.argv[1] == 'delete-ip-item':
            name = sys.argv[2]
            item = sys.argv[3]
            delete = store.deleteDataItem(item, 'ips', name, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'clear-ips':
            clear = store.clearAll('ips', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'discover-net':
            ipnet = None
            level = None
            try:
                ipnet = sys.argv[2]
                level = sys.argv[3]
            except IndexError:
                pass

            if ipnet is None:
                ipnet = tools.getIfconfigIPv4()
            else:
                i = ipnet.split('.')
                if len(i) == 1:
                    level = sys.argv[2]
                    ipnet = tools.getIfconfigIPv4()
            if level is None:
                level = 1

            run_discovery = tools.runDiscoverNet(ipnet, level, db_store)
            print(run_discovery)
            sys.exit(0)

        if sys.argv[1] == 'list-nmaps':
            scans = store.getNmaps(db_store)
            for row in scans:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'del-nmap':
            ip = sys.argv[2]
            del_ = store.deleteNmaps(ip, db_store)
            print(del_)
            sys.exit(0)

        if sys.argv[1] == 'clear-nmaps':
            clear = store.clearAllNmaps(db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'list-vulns':
            try:
                vid = sys.argv[2]
            except IndexError:
                vid = None
            run = tools.printVulnScan(db_store, vid)
            sys.exit(0)

        if sys.argv[1] == 'del-vuln':
            ip = sys.argv[2]
            del_ = store.deleteVulns(ip, db_store)
            print(del_)
            sys.exit(0)

        if sys.argv[1] == 'clear-vulns':
            clear = store.clearAllVulns(db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'check-vuln':
            vid = sys.argv[2]
            data = store.getVulnData(vid, db_store)
            run = tools.processVulnData(data)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'email-vuln':
            vid = sys.argv[2]
            data = store.getVulnData(vid, db_store)
            subject = 'sentinel vuln-scan'
            email = tools.sendEmail(subject, data, db_store)
            print(email)
            sys.exit(0)

        if sys.argv[1] == 'myip':
            myip = tools.getIfconfigIPv4()
            print(myip)
            sys.exit(0)

        if sys.argv[1] == 'udp':
            ip = sys.argv[2]
            port = sys.argv[3]
            run = tools.nmapUDP(ip, port)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'udpscan':
            ip = port = None
            try:
                ip = sys.argv[2]
                port = sys.argv[3]
            except IndexError:
                pass
            run = tools.nmapUDPscan(ip, port)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'tcp':
            ip = sys.argv[2]
            port = sys.argv[3]
            run = tools.nmapTCP(ip, port)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'list-detects':
            try:
                id_ = sys.argv[2]
            except IndexError:
                id_ = None
            run = tools.printDetectScan(db_store, id_)
            sys.exit(0)

        if sys.argv[1] == 'detect-scan':
            ip = sys.argv[2]
            scan = tools.nmapDetectScanStore(ip, db_store)
            print(str(scan))
            sys.exit(0)

        if sys.argv[1] == 'del-detect':
            id_ = sys.argv[2]
            del_ = store.deleteDetect(id_, db_store)
            print(del_)
            sys.exit(0)

        if sys.argv[1] == 'clear-detects':
            clear = store.clearAllDetects(db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'port-scan':
            ipnet = None
            level = 1
            try:
                ipnet = sys.argv[2]
                level = sys.argv[3]
            except IndexError:
                pass

            if ipnet is None:
                myip = tools.getIfconfigIPv4()
                ipn = tools.getIpNet(myip)
                print('discover net: ' + str(ipn))
                ipnet = tools.nmapNet(ipn)
            else:
                i = ipnet.split('.')

                if len(i) == 1:
                    level = sys.argv[2]
                    myip = tools.getIfconfigIPv4()
                    ipn = tools.getIpNet(myip)
                    print('discover net: ' + str(ipn))
                    ipnet = tools.nmapNet(ipn)
                else:
                    if tools.isNet(ipnet):
                        print('discover net: ' + str(ipnet))
                        ipnet = tools.nmapNet(ipnet)

            if type(ipnet) == str:
                ipnet = ipnet.split()
            scan = tools.runNmapScanMultiProcess(ipnet, level, db_store)
            print(scan)
            sys.exit(0)

        if sys.argv[1] == 'vuln-scan':
            try:
                ipnet = sys.argv[2]
            except IndexError:
                ipnet = None

            if ipnet is None:
                myip = tools.getIfconfigIPv4()
                ipn = tools.getIpNet(myip)
                print('discover net: ' + str(ipn))
                ipnet = tools.nmapNet(ipn)
            else:
                if tools.isNet(ipnet):
                    print('discover net: ' + str(ipnet))
                    ipnet = tools.nmapNet(ipnet)

            if type(ipnet) == str:
                ipnet = ipnet.split()

            scan = tools.runNmapVulnMultiProcess(ipnet, db_store)
            print(scan)
            sys.exit(0)

        if sys.argv[1] == 'detect-scan-net':
            ipnet = None
            try:
                ipnet = sys.argv[2]
            except IndexError:
                pass
            if ipnet is None:
                ipnet = tools.getIfconfigIPv4()
            ipn = tools.getIpNet(ipnet)
            print('ipnet: ' + ipn)
            hostLst = tools.nmapNet(ipn)
            scan = tools.runNmapDetectMultiProcess(hostLst, db_store)
            print(scan)
            sys.exit(0)

        if sys.argv[1] == 'list-configs':
            run = tools.printConfigs(db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'update-config':
            name = sys.argv[2]
            data = sys.argv[3]
            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            run = store.replaceINTO('configs', name, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'delete-config':
            rowid = sys.argv[2]
            run = store.deleteFrom('configs', rowid, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'list-rules':
            rows = store.selectAll('rules', db_store)
            for row in rows:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'update-rule':
            name = sys.argv[2]
            data = sys.argv[3]
            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            run = store.replaceINTO('rules', name, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'delete-rule':
            name = sys.argv[2]
            run = store.deleteFrom('rules', name, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'clear-rules':
            clear = store.clearAll('rules', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'list-jobs':
            rows = store.selectAll('jobs', db_store)
            for row in rows:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'update-job':
            name = sys.argv[2]
            data = sys.argv[3]
            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            run = store.replaceINTO('jobs', name, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'delete-job':
            name = sys.argv[2]
            run = store.deleteJob(name, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'clear-jobs':
            clear = store.clearAllJobs(db_store)
            print(clear)
            sys.exit(0)

#
# run-job name
#        if sys.argv[1] == 'run-job':
#            name = sys.argv[2]
#            run = tools.runJob(name, db_store)
#            print(str(run))
#            sys.exit(0)
#def runJob(name, db_store, gDict):

        if sys.argv[1] == 'sentry':
            try:
                v = sys.argv[2]
            except IndexError:
                v = False
            run = tools.sentryMode(db_store, verbose=v)
            print(str(run))
            sys.exit(0)


#
# list-jobs-running

#if sys.argv[1] == 'list-jobs-running':
#    run = tools.listRunning(db_store)
#    sys.exit(0)

#def listRunning(db_store):
#    rows = store.getAllCounts(db_store)
#    for row in rows:
#        print(row)
#    return True

#def getAllCounts(db_file):
#    con = sql_connection(db_file)
#    cur = con.cursor()
#    cur.execute("SELECT * FROM counts;")
#    rows = cur.fetchall()
#    return rows

        if sys.argv[1] == 'b2sum':
            _file = sys.argv[2]
            b2sum = tools.b2sum(_file)
            print(_file + ' ' + b2sum)
            sys.exit(0)

        if sys.argv[1] == 'b2sum-fim':
            try:
                name = sys.argv[2]
            except IndexError:
                name = None
            if name is None:
                fims = store.selectAll('fims', db_store)
                for i in fims:
                    name = i[0]
                    run = tools.b2sumFim(name, db_store)
                    print(str(name) + ' ' + str(run))
            else:
                run = tools.b2sumFim(name, db_store)
                print(str(run))
            sys.exit(0)

        if sys.argv[1] == 'check-fim':
            try:
                name = sys.argv[2]
            except IndexError:
                name = None
            if name is None:
                fims = store.selectAll('fims', db_store)
                for i in fims:
                    name = i[0]
                    run = tools.printFim(name, db_store)
                    print(str(name) + ' ' + str(run))
            else:
                run = tools.printFim(name, db_store)
                print(str(run))
            sys.exit(0)

        if sys.argv[1] == 'list-fims':
            run = tools.printAllFims(db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'list-fims-changed':
            run = tools.printAllFimsChanged(db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'add-fim':
            name = sys.argv[2]
            _file = sys.argv[3]
            add = tools.addFimFile(name, _file, db_store)
            print(str(add))
            sys.exit(0)

        if sys.argv[1] == 'del-fim':
            name = sys.argv[2]
            _file = sys.argv[3]
            add = tools.delFimFile(name, _file, db_store)
            print(str(add))
            sys.exit(0)

        if sys.argv[1] == 'update-fim':
            name = sys.argv[2]
            data = sys.argv[3]
            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            run = store.replaceINTO('fims', name, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'list-reports':
            reports = store.selectAll('reports', db_store)
            for row in reports:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'delete-report':
            name = sys.argv[2]
            delete = store.deleteFrom('reports', name, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'clear-reports':
            clear = store.clearAll('reports', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'update-report':
            name = sys.argv[2]
            data = sys.argv[3]
            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            run = store.replaceINTO('reports', name, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'list-alerts':
            alerts = store.selectAll('alerts', db_store)
            for row in alerts:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'delete-alert':
            name = sys.argv[2]
            delete = store.deleteFrom('alerts', name, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'clear-alerts':
            clear = store.clearAll('alerts', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'update-alert':
            name = sys.argv[2]
            data = sys.argv[3]
            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            run = store.replaceINTO('alerts', name, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'run-alert':
            name = sys.argv[2]
            run = tools.runAlert(name, db_store)
            print(str(run))
            sys.exit(0)

        if sys.argv[1] == 'run-create-db':
            run = store.createDB(db_store)
            print(str(run))
            sys.exit(0)

        if sys.argv[1] == 'run-ps':
            #import modules.ps.ps
            #run = modules.ps.ps.get_ps()
            from .modules.ps import ps
            run = ps.get_ps()
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'list-jobs-available':
            for k, v in tools.options.items():
                print(k)
            sys.exit(0)

        if sys.argv[1] == 'list-counts':
            reports = store.selectAll('counts', db_store)
            for row in reports:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'clear-counts':
            clear = store.clearAll('counts', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'list-proms':
            _prom = str(db_store) + '.prom'
            with open(_prom, 'r') as _file:
                lines = _file.readlines()
                for line in lines:
                    print(line.strip('\n'))
            sys.exit(0)

        if sys.argv[1] == 'list-proms-db':
            proms = store.selectAll('proms', db_store)
            for row in proms:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'clear-proms-db':
            clear = store.clearAll('proms', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'update-prom-db':
            name = sys.argv[2]
            data = sys.argv[3]
            run = store.replaceINTOproms(name, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'file-type':
            #import modules.gitegridy.gitegridy as git
            from .modules.gitegridy import gitegridy as git
            _file = sys.argv[2]
            file_type = git.fileType(_file)
            print(file_type)
            sys.exit(0)

        if sys.argv[1] == 'add-file':
            _file = sys.argv[2]
            store_file = store.storeFile(_file, db_store)
            print(store_file)
            sys.exit(0)

        if sys.argv[1] == 'del-file':
            _file = sys.argv[2]
            unstore_file = store.unstoreFile(_file, db_store)
            print(unstore_file)
            sys.exit(0)

        if sys.argv[1] == 'list-files':
            list_files = store.selectAll('files', db_store)
            for row in list_files:
                print(row[0], row[1])
            sys.exit(0)

        if sys.argv[1] == 'clear-files':
            clear = store.clearAll('files', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'fim-diff':
            _file = sys.argv[2]
            fim_diff = tools.fimDiff(_file, db_store)
            print(fim_diff)
            sys.exit(0)

        if sys.argv[1] == 'fim-restore':
            _file = sys.argv[2]
            try:
                _dest = sys.argv[3]
            except IndexError:
                _dest = None

            store_file_ = store.getData('files', _file, db_store)
            store_file_blob = store_file_[0]

            if _dest:
                dest = _dest
            else:
                dest = _file

            with open(dest, 'wb+') as outfile:
                outfile.write(store_file_blob)

            print('fim-restore ' + dest)
            sys.exit(0)

        if sys.argv[1] == 'av-scan':
            filedir = sys.argv[2]
            av_scan = tools.avScan(filedir, db_store)
            print(av_scan)
            sys.exit(0)

        if sys.argv[1] == 'tail':
            _file = sys.argv[2]
            for line in tools.tail(_file):
                print(line)
            sys.exit(0)

        if sys.argv[1] == 'logstream':
            for line in tools.logstream():
                print(line)
            sys.exit(0)

        if sys.argv[1] == 'logstream-json':
            for line in tools.logstream():
                print(line.decode('utf-8'))
            sys.exit(0)

        if sys.argv[1] == 'logstream-keys':
            for line in tools.logstream():
                jline = json.loads(line.decode('utf-8'))
                n = len(jline.keys())
                print(n, ' ', jline.keys())
            sys.exit(0)

        if sys.argv[1] == 'list-b2sums':
            rows = store.selectAll('b2sum', db_store)
            for row in rows:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'clear-b2sums':
            clear = store.clearAll('b2sum', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'list-sshwatch':
            rows = store.selectAll('sshwatch', db_store)
            for row in rows:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'clear-sshwatch':
            clear = store.clearAll('sshwatch', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'clear-training':
            clear = store.clearAll('training', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'list-training':
            try:
                _id = sys.argv[2]
            except IndexError:
                _id = None

            if _id:

                if _id == 'tags':
                    _tag = sys.argv[3]
                    rows = store.getAllTrainingTags(_tag, db_store)
                    for row in rows:
                        print(row)
                else:
                    row = store.getByID('training', _id, db_store)
                    print(row)

            else:
                rows = store.getAll('training', db_store)
                for row in rows:
                    print(row)
            sys.exit(0)

        if sys.argv[1] == 'update-training':
            tag = sys.argv[2]
            data = sys.argv[3]
            try:
                valid_json = json.loads(data)
            except json.decoder.JSONDecodeError:
                print('invalid json')
                sys.exit(1)
            run = store.updateTraining(tag, data, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'update-training-tag':
            _id = sys.argv[2]
            tag = sys.argv[3]
            run = store.updateTrainingTag(_id, tag, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'delete-training':
            rowid = sys.argv[2]
            delete = store.deleteFromRowid('training', rowid, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'sample-logstream':
            count = sys.argv[2]
            run = tools.sampleLogStream(count, db_store)
            sys.exit(0)

        if sys.argv[1] == 'mark-training':
            tag = sys.argv[2]
            run = store.markAllTraining(tag, db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'mark-training-on':
            name = sys.argv[2]
            run = tools.markTrainingRe(name, db_store)
            sys.exit(0)

        #list-occurrence [name|-gt,-lt,-eq num]
        if sys.argv[1] == 'list-occurrence':

            try:
                opn = sys.argv[2]
            except IndexError:
                opn = None
            try:
                val = sys.argv[3]
            except IndexError:
                val = None

            if val:
                rows = store.getByOp('occurrence', opn, val, db_store)
                for row in rows:
                    print(row)
            elif opn:
                row = store.getByName('occurrence', opn, db_store)
                print(row)
            else:
                rows = store.selectAll('occurrence', db_store)
                for row in rows:
                    print(row)
            sys.exit(0)

        if sys.argv[1] == 'clear-occurrence':
            clear = store.clearAll('occurrence', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'delete-occurrence':
            name = sys.argv[2]
            delete = store.deleteFrom('occurrence', name, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'copy-occurrence':
            name = sys.argv[2]
            _copy = store.copyOccurrenceToTraining(name, db_store)
            print(_copy)
            sys.exit(0)

        if sys.argv[1] == 'list-system-profile-full':
            rows = store.getAll('system_profile', db_store)
            for row in rows:
                print(row)
            sys.exit(0)

        if sys.argv[1] == 'list-system-profile':
            #name = sys.argv[2]
            #rows = store.selectAll('system_profile', db_store)
            rows = store.getAll('system_profile', db_store)
            for row in rows:
                print(row[0], row[1], row[2])
            sys.exit(0)

        if sys.argv[1] == 'gen-system-profile':
            run = tools.genSystemProfile(db_store)
            print(run)
            sys.exit(0)

        if sys.argv[1] == 'del-system-profile-name':
            name = sys.argv[2]
            delete = store.deleteFrom('system_profile', name, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'del-system-profile-rowid':
            rowid = sys.argv[2]
            delete = store.deleteFromRowid('system_profile', rowid, db_store)
            print(delete)
            sys.exit(0)

        if sys.argv[1] == 'clear-system-profile':
            clear = store.clearAll('system_profile', db_store)
            print(clear)
            sys.exit(0)

        if sys.argv[1] == 'get-system-profile-name':
            name = sys.argv[2]
            get = store.getByName('system_profile', name, db_store)
            print(get)
            sys.exit(0)

        if sys.argv[1] == 'get-system-profile-rowid':
            rowid = sys.argv[2]
            get = store.getByID('system_profile', rowid, db_store)
            print(get)
            sys.exit(0)

        if sys.argv[1] == 'diff-system-profile-rowid':
            rowid1 = sys.argv[2]
            rowid2 = sys.argv[3]
            diff = tools.diffSystemProfileIDs(rowid1, rowid2, db_store)
            print(diff)
            sys.exit(0)

        if sys.argv[1] == 'get-system-profile-data':
            rowid = sys.argv[2]
            data = sys.argv[3]
            get = tools.getSystemProfileData(rowid, data, db_store)
            print(get)
            sys.exit(0)

        #if sys.argv[1] == 'expire-key':
        #    _key = sys.argv[2]
        #    expire = tools.setExpiregDictKeyFile(_key, db_store)
        #    print(expire)
        #    sys.exit(0)

        #if sys.argv[1] == 'expire-key':
        #    _key = sys.argv[2:]
        #    from multiprocessing import shared_memory
        #    l = shared_memory.ShareableList([_key], name='sentinel')
        #    import time
        #    time.sleep(5)
        #    l.shm.close()
        #    l.shm.unlink()
        #    sys.exit(0)

        if sys.argv[1] == 'expire-keys':
            Keys = sys.argv[2:]
            #for _key in Keys:
            #    print(_key)

            from multiprocessing import shared_memory
            l = shared_memory.ShareableList(Keys, name='sentinel-update')
            import time
            time.sleep(5)
            l.shm.close()
            l.shm.unlink()
            sys.exit(0)

        if sys.argv[1] == 'get-key':
            key = sys.argv[2]
            from multiprocessing import shared_memory
            l = shared_memory.ShareableList(name='sentinel-shm')

            for i in range(0, len(l), 2):
                _key = l[i]
                _val = l[i + 1]
                if _key == key:
                    print(_val.rstrip())

            l.shm.close()
            l.shm.unlink()
            sys.exit(0)

        if sys.argv[1] == 'list-keys':
            from multiprocessing import shared_memory
            l = shared_memory.ShareableList(name='sentinel-shm')
            #print(l)
            il = iter(l)
            for item in il:
                #print(item, next(il))
                print(item)
                next(il)
            l.shm.close()
            l.shm.unlink()
            sys.exit(0)

        if sys.argv[1] == 'list-vals':
            from multiprocessing import shared_memory
            l = shared_memory.ShareableList(name='sentinel-shm')
            #print(l)
            #il = iter(l)
            #for item in il:
            #    #print(value)
            #    print(next(il))
            for i in range(0, len(l), 2):
                key = l[i]
                val = l[i + 1]
                print(val)
            l.shm.close()
            l.shm.unlink()
            sys.exit(0)

        if sys.argv[1] == 'list-keys-metric':
            from multiprocessing import shared_memory
            l = shared_memory.ShareableList(name='sentinel-shm')
            #print(l)
            #il = iter(l)
            #for item in il:
            #    #print(item, next(il))
            #    print(item)
            #    next(il)
            #for i in range(0,len(l),2):
            #    key = l[i]
            #    val = l[i+1]
            #    #print(key)
            #    #print(val.split()[-1])
            #    print(key, val.split()[-1])
            #    #print(val.split()[-1])
            #    #print(val)

            il = iter(l)
            for item in il:
                #print(item, next(il))
                n = next(il)
                print(item, n.split()[-1])

            l.shm.close()
            l.shm.unlink()
            sys.exit(0)

        #if sys.argv[1] == 'get-shm':
        #    from multiprocessing import shared_memory
        #    l = shared_memory.ShareableList(name='sentinel-update')
        #    #print(l)
        #    for item in l:
        #        print(item)
        #    l.shm.close()
        #    l.shm.unlink()
        #    sys.exit(0)

        else:
            usage()
            sys.exit(0)
    else:
        arpTbl = tools.getArps()
        update = store.update_arp_data(db_store, arpTbl, db_manuf)
        print(update)
        sys.exit(0)
Ejemplo n.º 28
0
def test(size):
    data = [1 for i in range(size)]
    dataM = shared_memory.ShareableList(data, name=f"data{size}")
    print(f'''{timeit("max(data)", globals=locals(), number=1):>.3e}''')
    print(f'''{timeit("max(dataM)", globals=locals(), number=1):>.3e}''')
Ejemplo n.º 29
0
    async def listen_to_network(self):
        ''' Firstly, connects to network and creates async listener variables.
            Then initializes variables needed for to listen to the can
            messages. And later when while loop is cancelled then stop
            the listener.

            Returns void.
        '''
        self.connect_to_network()
        self._starting_async_listener()
        # Uses circular buffer implementation which overwrites
        # oldest index with new data once the buffer is full.
        index = 0
        buffer_full = False
        try:
            while True:
                can_msg = await self.reader.get_message()
                # Sleeps 0.1 seconds so doesn't try to read all messages
                # at once.
                await asyncio.sleep(0.1)
                self.buffered_data[index] = {
                    'timestamp': can_msg.timestamp,
                    'arbitration_id': can_msg.arbitration_id,
                    'data': (can_msg.data).hex(' '),
                    'channel': can_msg.channel
                }

                index = (index + 1) % self.can_logging_buffer
                if (index == 0):
                    # Buffer is now completely full with can messages.
                    buffer_full = True
                # At every n-th index update shared memory for HTTP server
                if (index % self.can_logging['shm_update_interval_threshold']
                        == 0):
                    # First close open shared memory
                    if (self.shared_list is not None):
                        self.shared_list.shm.close()
                        self.shared_list.shm.unlink()
                    # Skips None data
                    if (not buffer_full):
                        # Compresses buffered data
                        temp_buff_data = zl.compress(
                            str(self.buffered_data[:index].tolist()).encode(
                                'UTF-8'), 2)
                    else:
                        # Compresses buffered data
                        temp_buff_data = zl.compress(
                            str(self.buffered_data.tolist()).encode('UTF-8'),
                            2)
                    try:
                        # Try to share buffered data
                        self.shared_list = shared_memory.ShareableList(
                            [temp_buff_data], name='shm_buff_data')
                    except FileExistsError:
                        # Logs where still saved so needs to close first
                        temp_shm = shared_memory.ShareableList(
                            name='shm_buff_data')
                        temp_shm.shm.close()
                        temp_shm.shm.unlink()
                        self.shared_list = shared_memory.ShareableList(
                            [temp_buff_data], name='shm_buff_data')

        except KeyboardInterrupt:
            self._closing_async_listener()
Ejemplo n.º 30
0
    st = s.settimeout
    ste = socket.timeout
    rdwr = socket.SHUT_RDWR
    se = socket.error

    #Initalisation of address
    print("READY!")
    addr = ["NC", "NC"]

    #Now the Actual Code
    #Run Before Connect
    l = Lock()
    smi = shared_memory.SharedMemory(create=True, size=max_img_size)  #Image
    smo = shared_memory.SharedMemory(
        create=True, size=3)  #Currently capturing signal, Quality, Stop signal
    smf = shared_memory.ShareableList([False])  #Frame rate
    sms = shared_memory.ShareableList([max_img_size])  #Image size

    #Setting default parameters
    smo.buf[0] = False  #Currently capturing signal
    smo.buf[1] = 35  #Quality
    smo.buf[2] = False  #Stop signal
    smf[0] = 1 / 60  #Frame rate (seconds per frame)

    #Making sure the client doesn't break on the first connection attempt
    img = Image.new('RGB', (100, 100))
    with io.BytesIO() as output:
        img.save(output, format="JPEG", quality=35)
        content = output.getvalue()
        y = len(content)
        sms[0] = y  #Set Image Size