Пример #1
0
def main():
  global args
  global debug
  global verboseDebug
  global debugToggled
  global pdbFlagged
  global shuttingDown

  parser = argparse.ArgumentParser(description=scriptName, add_help=False, usage='{} <arguments>'.format(scriptName))
  parser.add_argument('-v', '--verbose', dest='debug', help="Verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False)
  parser.add_argument('--extra-verbose', dest='verboseDebug', help="Super verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False)
  parser.add_argument('--ignore-existing', dest='ignoreExisting', help="Ignore preexisting files in the monitor directory", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False)
  parser.add_argument('--start-sleep', dest='startSleepSec', help="Sleep for this many seconds before starting", metavar='<seconds>', type=int, default=0, required=False)
  parser.add_argument('-r', '--recursive-directory', dest='recursiveDir', help="If specified, monitor all directories with this name underneath --directory", metavar='<name>', type=str, required=False)
  parser.add_argument('--min-bytes', dest='minBytes', help="Minimum size for checked files", metavar='<bytes>', type=int, default=MINIMUM_CHECKED_FILE_SIZE_DEFAULT, required=False)
  parser.add_argument('--max-bytes', dest='maxBytes', help="Maximum size for checked files", metavar='<bytes>', type=int, default=MAXIMUM_CHECKED_FILE_SIZE_DEFAULT, required=False)
  requiredNamed = parser.add_argument_group('required arguments')
  requiredNamed.add_argument('-d', '--directory', dest='baseDir', help='Directory to monitor', metavar='<directory>', type=str, required=True)

  try:
    parser.error = parser.exit
    args = parser.parse_args()
  except SystemExit:
    parser.print_help()
    exit(2)

  verboseDebug = args.verboseDebug
  debug = args.debug or verboseDebug
  if debug:
    eprint(os.path.join(scriptPath, scriptName))
    eprint("{} arguments: {}".format(scriptName, sys.argv[1:]))
    eprint("{} arguments: {}".format(scriptName, args))
  else:
    sys.tracebacklimit = 0

  # handle sigint and sigterm for graceful shutdown
  signal.signal(signal.SIGINT, shutdown_handler)
  signal.signal(signal.SIGTERM, shutdown_handler)
  signal.signal(signal.SIGUSR1, pdb_handler)
  signal.signal(signal.SIGUSR2, debug_toggle_handler)

  # sleep for a bit if requested
  sleepCount = 0
  while (not shuttingDown) and (sleepCount < args.startSleepSec):
    time.sleep(1)
    sleepCount += 1

  # add events to watch to EventWatcher class
  for method in EventWatcher._methods:
    event_process_generator(EventWatcher, method)

  # if directory to monitor doesn't exist, create it now
  if os.path.isdir(args.baseDir):
    preexistingDir = True
  else:
    preexistingDir = False
    if debug: eprint(f'{scriptname}: creating "{args.baseDir}" to monitor')
    pathlib.Path(args.baseDir).mkdir(parents=False, exist_ok=True)

  # if recursion was requested, get list of directories to monitor
  watchDirs = []
  while (len(watchDirs) == 0):
    if args.recursiveDir is None:
      watchDirs = [args.baseDir]
    else:
      watchDirs = glob.glob(f'{args.baseDir}/**/{args.recursiveDir}', recursive=True)

  # begin threaded watch of path(s)
  time.sleep(1)
  watch_manager = pyinotify.WatchManager()
  event_notifier = pyinotify.ThreadedNotifier(watch_manager, EventWatcher())
  for watchDir in watchDirs:
    watch_manager.add_watch(os.path.abspath(watchDir), pyinotify.ALL_EVENTS)
  if debug: eprint(f"{scriptName}: monitoring {watchDirs}")
  time.sleep(2)
  event_notifier.start()

  # if there are any previously included files (and not ignoreExisting), "touch" them so that they will be notified on
  if preexistingDir and (not args.ignoreExisting):
    filesTouched = 0
    for watchDir in watchDirs:
      for preexistingFile in [os.path.join(watchDir, x) for x in pathlib.Path(watchDir).iterdir() if x.is_file()]:
        touch(preexistingFile)
        filesTouched += 1
    if debug and (filesTouched > 0):
      eprint(f"{scriptName}: found {filesTouched} preexisting files to check")

  # loop forever, or until we're told to shut down, whichever comes first
  while (not shuttingDown):
    if pdbFlagged:
      pdbFlagged = False
      breakpoint()
    time.sleep(0.2)

  # graceful shutdown
  if debug: eprint(f"{scriptName}: shutting down...")
  event_notifier.stop()
  time.sleep(1)

  if debug: eprint(f"{scriptName}: finished monitoring {watchDirs}")
Пример #2
0
    def __init__(self):
        Watcher.__init__(self)

        self.wm = pyinotify.WatchManager()
        self.notifier = None
        self.callback = None
Пример #3
0
def create_notifier(attrs):
    """Create a notifier from the specified configuration attributes *attrs*."""
    tmask = (pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_TO
             | pyinotify.IN_CREATE)

    wm_ = pyinotify.WatchManager()

    opath, ofile = os.path.split(globify(attrs["origin"]))

    def fun(pathname):
        """Execute unpacking and copying/moving of *pathname*."""
        efile = os.path.basename(pathname)
        if fnmatch.fnmatch(efile, ofile):
            LOGGER.info("We have a match: %s", str(pathname))
            if attrs["compression"]:
                try:
                    unpack_fun = eval(attrs["compression"])
                    if "prog" in attrs:
                        new_path = unpack_fun(pathname,
                                              attrs["working_directory"],
                                              attrs["prog"])
                    else:
                        new_path = unpack_fun(pathname,
                                              attrs["working_directory"])
                except Exception:
                    LOGGER.exception("Could not decompress %s", pathname)
                    return

            else:
                new_path = pathname
            try:
                move_it(new_path, attrs["destinations"],
                        attrs.get("copy_hook", None))
            except Exception:
                LOGGER.error("Something went wrong during copy of %s",
                             pathname)
            else:
                if attrs["delete"]:
                    try:
                        os.remove(pathname)
                        if attrs["delete_hook"]:
                            attrs["delete_hook"](pathname)
                        LOGGER.debug("Removed %s", pathname)
                    except OSError as e__:
                        if e__.errno == 2:
                            LOGGER.debug("Already deleted: %s", pathname)
                        else:
                            raise

            # delete temporary file
            if pathname != new_path:
                try:
                    os.remove(new_path)
                except OSError as e__:
                    if e__.errno == 2:
                        pass
                    else:
                        raise

    tnotifier = pyinotify.ThreadedNotifier(wm_, EventHandler(fun))

    wm_.add_watch(opath, tmask)

    return tnotifier
Пример #4
0
        "IN_MODIFY",
        "IN_MOVE_SELF",
        "IN_MOVED_FROM",
        "IN_MOVED_TO",
        "IN_Q_OVERFLOW",
        "IN_UNMOUNT",
        "default",
    ]


def process_generator(cls, method):
    def _method_name(self, event):
        print(
            textwrap.dedent('''\
        Method name: process_{}()
        Path name: {}
        Event name: {}
        '''.format(method, event.pathname, event.maskname)))

    _method_name.__name__ = "process_{}".format(method)
    setattr(cls, _method_name.__name__, _method_name)


for method in EventProcessor._methods:
    process_generator(EventProcessor, method)

watch_manager = pyinotify.WatchManager()
watch_manager.add_watch("/tmp", pyinotify.ALL_EVENTS)
event_notifier = pyinotify.Notifier(watch_manager, EventProcessor())
event_notifier.loop()
Пример #5
0

def dequeue(qentry):
    qentry = os.path.basename(qentry)
    i = qentry.rfind('-')
    a, s = qentry[:i], qentry[i + 1:]
    PIDWrap(10, _dequeue, (
        qentry, )).destination = '../uploads/' + a + '/' + s + '/.autofeedback'


if __name__ == '__main__':
    justme()
    parse_assignments()
    # set up inotify

    wm = pin.WatchManager()
    mask = pin.IN_CLOSE_WRITE | pin.IN_MOVED_TO
    watched = wm.add_watch(home + '/queued', mask, rec=False)
    watched = wm.add_watch(home, mask, rec=False)

    class EventHandler(pin.ProcessEvent):
        '''Given a multiprocessing pool and a list,
		for each file event, requests the pool handle file
		and puts the request handler in the list'''
        def process_default(self, event):
            if event.pathname.endswith('assignments.json'):
                parse_assignments(True)
            elif '/queued/' in event.pathname and not event.mask & pin.IN_IGNORED:
                dequeue(event.pathname)
            else:
                pass  # log('ignoring event on', event.pathname)
Пример #6
0
 def start_watch(self):
     wm = pyinotify.WatchManager()
     wm.add_watch(self.path, pyinotify.ALL_EVENTS, rec=True)
     eh = MyEventHandler()
     thread.start_new_thread(start_watch, (wm, eh))
Пример #7
0
	def enable_inotify(self):
		self._wm = pyinotify.WatchManager()
		self._notifier = pyinotify.Notifier(self._wm, lambda *a, **b: False)
		self._wm.add_watch('/dev/input', pyinotify.IN_CREATE, False)
		self.daemon.get_poller().register(self._notifier._fd,
				self.daemon.get_poller().POLLIN, self._inotify_cb)
Пример #8
0
def watch_delay_call(base_directory,
                     callback,
                     delay=0.5,
                     call_once_initially=True,
                     mask=pyinotify.IN_CREATE | pyinotify.IN_CLOSE_WRITE
                     | pyinotify.IN_MODIFY):
    """Watch all files below a directory and execute a command on changes.
    Add some delay so that multiple save operations trigger a single execution.
    Example:
      def filechanged(paths):
        # TODO: Do something useful.
        print(paths)
      _watch_delay_call('.', filechanged)
    Args:
      base_directory: Directory to monitor, recursively.
      callback: Function to call on file change, with a list of paths.
      delay: Time in seconds to delay.
      call_once_initially: Set to true to call the callback once initially.
      mask: File system changes to listen for (by default any file change).
    """
    class Process(pyinotify.ProcessEvent):
        def __init__(self, immediate_callback):
            self.immediate_callback = immediate_callback

        def process_default(self, event):
            target = os.path.join(event.path, event.name)
            self.immediate_callback(target)

    def delay_call(pipe, delayed_callback, delay, call_once_initially):
        if call_once_initially:
            delayed_callback(None)

        path_list = []

        while True:
            # Wait until there is a change.
            path_list.append(pipe.recv())
            while pipe.poll():
                path_list.append(pipe.recv())

            # Delay
            time.sleep(delay)

            # If there are more changes, restart the timer.
            if pipe.poll():
                continue

            # Execute the callback.
            delayed_callback(path_list)

            path_list = []

    receiver, sender = multiprocessing.Pipe(False)

    delay_callback_thread = threading.Thread(target=delay_call,
                                             args=(receiver, callback, delay,
                                                   call_once_initially))
    delay_callback_thread.daemon = True  # dies with this program.
    delay_callback_thread.start()

    while True:
        wm = pyinotify.WatchManager()
        notifier = pyinotify.Notifier(wm, Process(sender.send))
        wm.add_watch(base_directory, mask, rec=True, auto_add=True)
        try:
            while True:
                notifier.process_events()
                if notifier.check_events():
                    notifier.read_events()
        except KeyboardInterrupt:
            notifier.stop()
            break
Пример #9
0
    def _execute(self, options, args):
        """Start the watcher."""

        self.logger = get_logger('auto', self.site.loghandlers)
        LRSocket.logger = self.logger

        if WebSocket is object and pyinotify is None:
            req_missing(['ws4py', 'pyinotify'], 'use the "auto" command')
        elif WebSocket is object:
            req_missing(['ws4py'], 'use the "auto" command')
        elif pyinotify is None:
            req_missing(['pyinotify'], 'use the "auto" command')

        self.cmd_arguments = ['build']
        if self.site.configuration_filename != 'conf.py':
            self.cmd_arguments = [
                '--conf=' + self.site.configuration_filename
            ] + self.cmd_arguments

        # Run an initial build so we are up-to-date
        subprocess.call(["nikola"] + self.cmd_arguments)

        port = options and options.get('port')
        self.snippet = '''<script>document.write('<script src="http://'
            + (location.host || 'localhost').split(':')[0]
            + ':{0}/livereload.js?snipver=1"></'
            + 'script>')</script>
        </head>'''.format(port)

        watched = [
            self.site.configuration_filename,
            'themes/',
            'templates/',
        ]
        for item in self.site.config['post_pages']:
            watched.append(os.path.dirname(item[0]))
        for item in self.site.config['FILES_FOLDERS']:
            watched.append(item)
        for item in self.site.config['GALLERY_FOLDERS']:
            watched.append(item)
        for item in self.site.config['LISTINGS_FOLDERS']:
            watched.append(item)

        out_folder = self.site.config['OUTPUT_FOLDER']
        if options and options.get('browser'):
            browser = True
        else:
            browser = False

        if options['ipv6']:
            dhost = '::'
        else:
            dhost = None

        host = options['address'].strip('[').strip(']') or dhost

        # Start watchers that trigger reloads
        reload_wm = pyinotify.WatchManager()
        reload_notifier = pyinotify.ThreadedNotifier(reload_wm,
                                                     self.do_refresh)
        reload_notifier.start()
        reload_wm.add_watch(out_folder, MASK, rec=True)  # Watch output folders

        # Start watchers that trigger rebuilds
        rebuild_wm = pyinotify.WatchManager()
        rebuild_notifier = pyinotify.ThreadedNotifier(rebuild_wm,
                                                      self.do_rebuild)
        rebuild_notifier.start()
        for p in watched:
            if os.path.exists(p):
                rebuild_wm.add_watch(p, MASK, rec=True)  # Watch input folders

        parent = self

        class Mixed(WebSocketWSGIApplication):
            """A class that supports WS and HTTP protocols in the same port."""
            def __call__(self, environ, start_response):
                if environ.get('HTTP_UPGRADE') is None:
                    return parent.serve_static(environ, start_response)
                return super(Mixed, self).__call__(environ, start_response)

        ws = make_server(host,
                         port,
                         server_class=WSGIServer,
                         handler_class=WebSocketWSGIRequestHandler,
                         app=Mixed(handler_cls=LRSocket))
        ws.initialize_websockets_manager()
        self.logger.info("Serving HTTP on {0} port {1}...".format(host, port))
        if browser:
            if options['ipv6'] or '::' in host:
                server_url = "http://[{0}]:{1}/".format(host, port)
            else:
                server_url = "http://{0}:{1}/".format(host, port)

            self.logger.info(
                "Opening {0} in the default web browser...".format(server_url))
            # Yes, this is racy
            webbrowser.open('http://{0}:{1}'.format(host, port))

        try:
            ws.serve_forever()
        except KeyboardInterrupt:
            self.logger.info("Server is shutting down.")
            # This is a hack, but something is locking up in a futex
            # and exit() doesn't work.
            os.kill(os.getpid(), 15)
Пример #10
0
def fiji_grid_fuse(directories,
                   tiles_file,
                   out_file,
                   threshold_reg=0.3,
                   threshold_maxavg=2,
                   threshold_abs=3,
                   do_crop=False):

    source_dir, tiles_full_fname = os.path.split(tiles_file)

    out_file_name = os.path.splitext(out_file)[0]
    out_file_path = "{}.{}".format(out_file_name, img_data_fmt)
    pre_crop_fname = "{}.precrop.{}".format(os.path.basename(out_file_name),
                                            img_data_fmt)
    pre_crop_file = os.path.join(directories['out'], pre_crop_fname)
    base_dir = os.path.dirname(out_file_path)

    fiji_args = fiji_fuse_arg_tpl.format(input_dir_path=source_dir,
                                         tile_file=tiles_full_fname,
                                         threshold_regression=threshold_reg,
                                         threshold_maxavg=threshold_maxavg,
                                         threshold_abs=threshold_abs)
    fiji_write = fiji_write_arg_tpl.format(directories['out'])
    fiji_inst = fiji_grid_stitch_inst_tpl.format(fiji_args + fiji_write)
    command = fiji_command_template.format(fiji_inst)

    # Establish inotify watches for ensuring output channels are fully written
    # before proceding with merging
    channel_files = [
        pth.join(directories['out'], "img_t1_z1_c{}".format(i))
        for i in range(1, 4)
    ]
    wm = pyinotify.WatchManager()
    watches = FilesWaiter(channel_files, wm)

    # Run fusion
    print("Command is:")
    print(command)
    exec_fun = command_executor_factory(directories['log'], pre_crop_fname)
    retcode = exec_fun(command)
    success = (not bool(retcode))
    result_name = "done" if success else "failed"

    log_dir = os.path.join(base_dir, 'log')
    dir_util.copy_tree(directories['log'], log_dir)

    debug_log("Assembling of", pre_crop_file, result_name, retcode)
    matrix, image = [None] * 2
    channels_done = watches.wait()
    if success and channels_done:

        channels = [
            cv2.imread(chan, cv2.IMREAD_GRAYSCALE) for chan in channel_files
        ]
        [os.remove(chan) for chan in channel_files]
        image_precrop = cv2.merge(channels[::-1])

        if do_crop:
            debug_log("Straigthening and Cropping", pre_crop_file, "into",
                      out_file_path)
            matrix, image = straighten_crop(image_precrop, True)
        else:
            image = image_precrop

        success = cv2_to_file(image, out_file_path)
        result_name = "done" if success else "failed"
        debug_log("Assembly of", out_file_path, result_name)

    return success, image, matrix
Пример #11
0
    config = configparser.ConfigParser()
    config.read(JUPITER_CONFIG_INI_PATH)
    ssh_svc_port, _ = config['PORT_MAPPINGS']['SSH'].split(':')

    task_name = os.environ['MY_TASK_NAME']
    input_q = queue.Queue()
    #input_q = multiprocessing.Queue()

    app_config = app_config_parser.AppConfig(APP_DIR)
    log.info(f"Using base script {app_config.base_script(task_name)}")
    module_name = app_config.base_script(task_name).replace(".py", "")
    # import task base script from the app_specific_files dir
    task_module = importlib.import_module(
        "build.app_specific_files.{}".format(module_name))

    input_wm = pyinotify.WatchManager()
    input_wm.add_watch(CIRCE_INPUT_DIR, pyinotify.ALL_EVENTS)
    log.debug('starting the input monitoring process')
    input_handler = InputFolderHandler(input_q, task_name)
    in_notifier = pyinotify.ThreadedNotifier(input_wm, input_handler)
    in_notifier.start()

    output_wm = pyinotify.WatchManager()
    output_wm.add_watch(CIRCE_OUTPUT_DIR, pyinotify.ALL_EVENTS)
    log.debug('starting the output monitoring process')
    output_handler = OutputFolderHandler(task_name, ssh_svc_port)
    out_notifier = pyinotify.ThreadedNotifier(output_wm, output_handler)
    out_notifier.start()

    t = threading.Thread(target=task_module.task,
                         args=(input_q, CIRCE_INPUT_DIR, CIRCE_OUTPUT_DIR,
Пример #12
0
def main():
    global args
    global debug
    global debugToggled
    global pdbFlagged
    global shuttingDown

    parser = argparse.ArgumentParser(description=scriptName,
                                     add_help=False,
                                     usage='{} <arguments>'.format(scriptName))
    parser.add_argument('-v',
                        '--verbose',
                        dest='debug',
                        help="Verbose output",
                        metavar='true|false',
                        type=str2bool,
                        nargs='?',
                        const=True,
                        default=False,
                        required=False)
    parser.add_argument(
        '--ignore-existing',
        dest='ignoreExisting',
        help="Ignore preexisting files in the monitor directory",
        metavar='true|false',
        type=str2bool,
        nargs='?',
        const=True,
        default=False,
        required=False)
    parser.add_argument(
        '--preserve',
        dest='preserveMode',
        help=f"File preservation mode (default: {PRESERVE_QUARANTINED})",
        metavar=f'[{PRESERVE_QUARANTINED}|{PRESERVE_ALL}|{PRESERVE_NONE}]',
        type=str,
        default=PRESERVE_QUARANTINED,
        required=False)
    parser.add_argument('--min-bytes',
                        dest='minBytes',
                        help="Minimum size for checked files",
                        metavar='<bytes>',
                        type=int,
                        default=MINIMUM_CHECKED_FILE_SIZE_DEFAULT,
                        required=False)
    parser.add_argument('--max-bytes',
                        dest='maxBytes',
                        help="Maximum size for checked files",
                        metavar='<bytes>',
                        type=int,
                        default=MAXIMUM_CHECKED_FILE_SIZE_DEFAULT,
                        required=False)
    parser.add_argument('--malass-host',
                        dest='malassHost',
                        help="Malass host or IP address",
                        metavar='<host>',
                        type=str,
                        required=False)
    parser.add_argument('--malass-port',
                        dest='malassPort',
                        help="Malass web interface port",
                        metavar='<port>',
                        type=int,
                        default=80,
                        required=False)
    parser.add_argument('--malass-limit',
                        dest='malassLimit',
                        help="Malass maximum concurrent scans",
                        metavar='<limit>',
                        type=int,
                        default=MAL_MAX_REQS,
                        required=False)
    parser.add_argument('--vtot-api',
                        dest='vtotApi',
                        help="VirusTotal API key",
                        metavar='<API key>',
                        type=str,
                        required=False)
    parser.add_argument('--vtot-req-limit',
                        dest='vtotReqLimit',
                        help="VirusTotal requests per minute limit",
                        metavar='<requests>',
                        type=int,
                        default=VTOT_MAX_REQS,
                        required=False)
    parser.add_argument(
        '--clamav',
        dest='enableClamAv',
        metavar='true|false',
        help="Enable ClamAV (if VirusTotal and Malass are unavailable)",
        type=str2bool,
        nargs='?',
        const=True,
        default=False,
        required=False)
    parser.add_argument('--start-sleep',
                        dest='startSleepSec',
                        help="Sleep for this many seconds before starting",
                        metavar='<seconds>',
                        type=int,
                        default=0,
                        required=False)
    parser.add_argument('--zeek-log',
                        dest='broSigLogSpec',
                        help="Filespec to write Zeek signature log",
                        metavar='<filespec>',
                        type=str,
                        required=False)
    parser.add_argument(
        '-r',
        '--recursive-directory',
        dest='recursiveDir',
        help=
        "If specified, monitor all directories with this name underneath --directory",
        metavar='<name>',
        type=str,
        required=False)
    requiredNamed = parser.add_argument_group('required arguments')
    requiredNamed.add_argument('-d',
                               '--directory',
                               dest='baseDir',
                               help='Directory to monitor',
                               metavar='<directory>',
                               type=str,
                               required=True)

    try:
        parser.error = parser.exit
        args = parser.parse_args()
    except SystemExit:
        parser.print_help()
        exit(2)

    debug = args.debug
    if debug:
        eprint(os.path.join(scriptPath, scriptName))
        eprint("Arguments: {}".format(sys.argv[1:]))
        eprint("Arguments: {}".format(args))
    else:
        sys.tracebacklimit = 0

    args.preserveMode = args.preserveMode.lower()
    if (len(args.preserveMode) == 0):
        args.preserveMode = PRESERVE_QUARANTINED
    elif (args.preserveMode
          not in [PRESERVE_QUARANTINED, PRESERVE_ALL, PRESERVE_NONE]):
        eprint(f'Invalid file preservation mode "{args.preserveMode}"')
        sys.exit(1)

    # handle sigint and sigterm for graceful shutdown
    signal.signal(signal.SIGINT, shutdown_handler)
    signal.signal(signal.SIGTERM, shutdown_handler)
    signal.signal(signal.SIGUSR1, pdb_handler)
    signal.signal(signal.SIGUSR2, debug_toggle_handler)

    # sleep for a bit if requested
    sleepCount = 0
    while (not shuttingDown) and (sleepCount < args.startSleepSec):
        time.sleep(1)
        sleepCount += 1

    broSigLogSpec = args.broSigLogSpec
    if broSigLogSpec is not None:
        if os.path.isdir(broSigLogSpec):
            # _carved tag will be recognized by 11_zeek_logs.conf in logstash
            broSigLogSpec = os.path.join(broSigLogSpec,
                                         "signatures(_carved).log")
        else:
            # make sure path to write to zeek signatures log file exists before we start writing
            pathlib.Path(os.path.dirname(
                os.path.realpath(broSigLogSpec))).mkdir(parents=True,
                                                        exist_ok=True)

    # add events to watch to EventWatcher class
    for method in EventWatcher._methods:
        event_process_generator(EventWatcher, method)

    if os.path.isdir(args.baseDir):
        preexistingDir = True
    else:
        preexistingDir = False
        if debug: eprint(f'Creating "{args.baseDir}" to monitor')
        pathlib.Path(args.baseDir).mkdir(parents=False, exist_ok=True)

    quarantineDir = os.path.join(args.baseDir, "quarantine")
    preserveDir = os.path.join(args.baseDir, "preserved")
    if (args.preserveMode !=
            PRESERVE_NONE) and (not os.path.isdir(quarantineDir)):
        if debug: eprint(f'Creating "{quarantineDir}" for quarantined files')
        pathlib.Path(quarantineDir).mkdir(parents=False, exist_ok=True)
    if (args.preserveMode
            == PRESERVE_ALL) and (not os.path.isdir(preserveDir)):
        if debug: eprint(f'Creating "{preserveDir}" for other preserved files')
        pathlib.Path(preserveDir).mkdir(parents=False, exist_ok=True)

    watchDirs = []
    while (len(watchDirs) == 0):
        if args.recursiveDir is None:
            watchDirs = [args.baseDir]
        else:
            watchDirs = glob.glob(f'{args.baseDir}/**/{args.recursiveDir}',
                                  recursive=True)

    newFileQueue = deque()
    hashedFileQueue = deque()
    toCheckFileQueue = deque()
    checkingFileQueue = deque()
    finishedFileQueue = deque()
    hashCache = TTLCache(
        maxsize=MAX_HASH_CACHE_SIZE,
        ttl=MAX_HASH_CACHE_TTL)  # only used in the main thread

    if (isinstance(args.malassHost, str) and (len(args.malassHost) > 1)):
        checkConnInfo = MalassScan(args.malassHost,
                                   args.malassPort,
                                   reqLimit=args.malassLimit)
    elif (isinstance(args.vtotApi, str) and (len(args.vtotApi) > 1)
          and (args.vtotReqLimit > 0)):
        checkConnInfo = VirusTotalSearch(args.vtotApi,
                                         reqLimit=args.vtotReqLimit)
    elif args.enableClamAv:
        checkConnInfo = ClamAVScan(debug=debug)
    else:
        checkConnInfo = None

    # begin threaded watch of directory
    time.sleep(1)
    watch_manager = pyinotify.WatchManager()
    event_notifier = pyinotify.ThreadedNotifier(watch_manager,
                                                EventWatcher(newFileQueue))
    for watchDir in watchDirs:
        watch_manager.add_watch(os.path.abspath(watchDir),
                                pyinotify.ALL_EVENTS)
    if debug:
        eprint(f"Monitoring {watchDirs}")
    event_notifier.start()

    # hash files as they are discovered
    fileHashWorkers = ThreadPool(HASH_THREADS, hashFileWorker,
                                 ([newFileQueue, hashedFileQueue], ))
    submitCheckWorkers = ThreadPool(
        SUBMIT_THREADS if not isinstance(checkConnInfo, ClamAVScan) else
        CLAM_MAX_REQS, submitFileWorker,
        ([toCheckFileQueue, checkingFileQueue, checkConnInfo], ))
    resultCheckWorkers = ThreadPool(
        RESULT_THREADS, resultCheckWorker,
        ([checkingFileQueue, finishedFileQueue, checkConnInfo], ))

    # if there are any previously included files, start with them
    if preexistingDir and (not args.ignoreExisting):
        filesTouched = 0
        for watchDir in watchDirs:
            for preexistingFile in [
                    os.path.join(watchDir, x)
                    for x in pathlib.Path(watchDir).iterdir() if x.is_file()
            ]:
                open(preexistingFile, 'a').close()
                os.utime(preexistingFile, None)
                filesTouched += 1
        if debug and (filesTouched > 0):
            eprint(f"Found {filesTouched} preexisting files to check")

    with open(broSigLogSpec, 'w+', 1) if (
            broSigLogSpec is not None) else nullcontext() as broSigFile:

        # write out header for our super legit zeek signature.log file
        if (broSigFile is not None):
            print('#separator \\x09', file=broSigFile, end='\n')
            print('#set_separator\t,', file=broSigFile, end='\n')
            print('#empty_field\t(empty)', file=broSigFile, end='\n')
            print('#unset_field\t-', file=broSigFile, end='\n')
            print('#path\tsignature', file=broSigFile, end='\n')
            print(f'#open\t{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}',
                  file=broSigFile,
                  end='\n')
            print(re.sub(
                r"\b((orig|resp)_[hp])\b", r"id.\1",
                f"#fields\t{BroStringFormat}".replace('{',
                                                      '').replace('}', '')),
                  file=broSigFile,
                  end='\n')
            print(f'#types\t{BroSignatureTypes}', file=broSigFile, end='\n')

        debugStats = []
        prevDebugStats = []

        while (not shuttingDown):

            if pdbFlagged:
                pdbFlagged = False
                breakpoint()

            processedEvents = 0

            # processed files for which checking is finished
            while (not shuttingDown) and (processedEvents <
                                          (MAX_PROCESSED_BATCH_SIZE // 2)):

                if pdbFlagged:
                    pdbFlagged = False
                    breakpoint()

                try:
                    fileEvent = finishedFileQueue.popleft()
                except IndexError:
                    break
                else:
                    processedEvents += 1
                    triggered = False
                    debugStr = f"FIN: {fileEvent.event.pathname} is {fileEvent.hash[:8]} ({fileEvent.result})" if debug else ""

                    if (broSigFile is not None):

                        if isinstance(fileEvent.request, AnalyzerScan):
                            scanResult = fileEvent.request.provider.format(
                                fileEvent.result)
                            triggered = (scanResult.hits > 0)

                            if triggered:
                                fileSpecFields = extracted_filespec_to_fields(
                                    fileEvent.event.pathname)
                                broLine = BroSignatureLine(
                                    ts=f"{fileSpecFields.time}",
                                    uid=fileSpecFields.uid
                                    if fileSpecFields.uid is not None else '-',
                                    note=ZEEK_SIGNATURE_NOTICE,
                                    signature_id=scanResult.message,
                                    event_message=scanResult.description,
                                    sub_message=fileSpecFields.fid
                                    if fileSpecFields.fid is not None else
                                    os.path.basename(fileEvent.event.pathname),
                                    signature_count=scanResult.hits,
                                    host_count=scanResult.engines)
                                broLineStr = BroStringFormat.format(
                                    **broLine._asdict())
                                debugStr = broLineStr

                                # write broLineStr event line out to zeek signature.log
                                print(broLineStr, file=broSigFile, end='\n')

                                # save BroSignatureLine-formatted result if it's found in the hash again later
                                fileEvent.result = broLine

                    if triggered and (args.preserveMode != PRESERVE_NONE):
                        # move triggering file to quarantine
                        try:
                            shutil.move(fileEvent.event.pathname,
                                        quarantineDir)
                        except:
                            # hm move failed, delete it i guess?
                            os.remove(fileEvent.event.pathname)

                    elif (args.preserveMode == PRESERVE_ALL):
                        # move non-triggering file to preserved directory
                        try:
                            shutil.move(fileEvent.event.pathname, preserveDir)
                        except:
                            # hm move failed, delete it i guess?
                            os.remove(fileEvent.event.pathname)

                    else:
                        # delete the file
                        os.remove(fileEvent.event.pathname)

                    if debug: eprint(debugStr)

                    # this file has been checked, update the hash cache with the final result
                    hashCache[fileEvent.hash] = fileEvent

            # process new hashed files to be checked
            queuedDupes = deque()
            while (not shuttingDown) and (processedEvents <
                                          MAX_PROCESSED_BATCH_SIZE):

                if pdbFlagged:
                    pdbFlagged = False
                    breakpoint()

                try:
                    fileEvent = hashedFileQueue.popleft()
                except IndexError:
                    break
                else:
                    processedEvents += 1
                    debugStr = f"POP: {fileEvent.event.pathname} is {fileEvent.hash[:8]} ({fileEvent.result})" if debug else ""

                    if fileEvent.hash in hashCache:
                        triggered = False

                        if hashCache[fileEvent.hash].result is not None:
                            # the file has already been checked all the way through the pipeline and has a result
                            debugStr = f"OLD: {fileEvent.event.pathname} is {fileEvent.hash[:8]} ({fileEvent.result})" if debug else ""

                            triggered = isinstance(
                                hashCache[fileEvent.hash].result,
                                BroSignatureLine)
                            if triggered:

                                # this file triggered a previous signature match, so we don't need to bother processing it again

                                # just update the new fields for the copy of the log
                                fileSpecFields = extracted_filespec_to_fields(
                                    fileEvent.event.pathname)
                                dupResultBroLine = copy.deepcopy(
                                    hashCache[fileEvent.hash].result)
                                dupResultBroLine.ts = f"{fileSpecFields.time}"
                                dupResultBroLine.uid = fileSpecFields.uid if fileSpecFields.uid is not None else '-'
                                dupResultBroLine.sub_message = f"{fileSpecFields.fid if fileSpecFields.fid is not None else os.path.basename(fileEvent.event.pathname)},{hashCache[fileEvent.hash].result.sub_message}"

                                broLineStr = BroStringFormat.format(
                                    **dupResultBroLine._asdict())
                                debugStr = f"{broLineStr}"

                                # write broLineStr event line out to zeek signature.log
                                print(broLineStr, file=broSigFile, end='\n')

                                # don't save the duplicate, since we've already saved the original and reference it in the log
                                os.remove(fileEvent.event.pathname)

                            else:
                                # the file is in the pipeline to be checked, so we don't know the result, but we don't want to check it mulitple times...
                                # debugStr = f"AOK: {fileEvent.event.pathname} is {fileEvent.hash[:8]} ({fileEvent.result})" if debug else ""
                                debugStr = ""  # too verbose, even for debug

                                # seen before, but not triggered, so just delete this harmless file
                                os.remove(fileEvent.event.pathname)

                        else:
                            # todo: BUG: if submission failed for everyone, then they're all just sitting in the queue but nobody ever retries

                            # the file is in the pipeline to be checked, so we don't know the result, but we don't want to check it mulitple times...
                            # debugStr = f"DUP: {fileEvent.event.pathname} is {fileEvent.hash[:8]} ({fileEvent.result})" if debug else ""
                            debugStr = ""  # too verbose, even for debug

                            if checkConnInfo is not None:
                                # as long as we have some kind of file checker registered (any(checkConnInfo)),
                                # after the loop we will reinsert this into the back end of the queue for checking later
                                queuedDupes.append(fileEvent)

                            else:
                                # no file checker created. don't save the duplicate, since we'd have already saved the original
                                os.remove(fileEvent.event.pathname)

                        if debug and (len(debugStr) > 0): eprint(debugStr)

                    else:
                        # this is a file we have not seen before
                        if debug:
                            eprint(
                                f"NEW: {fileEvent.event.pathname} is {fileEvent.hash[:8]}"
                            )
                        hashCache[fileEvent.hash] = fileEvent
                        toCheckFileQueue.append(fileEvent)

            # put duplicated processing events back into the hashedFileQueue to check again in a bit
            dupeEvents = 0
            while (len(queuedDupes) > 0):

                if pdbFlagged:
                    pdbFlagged = False
                    breakpoint()

                dupeEvents += 1
                hashedFileQueue.append(queuedDupes.popleft())

            # if we didn't do anything, sleep for a bit before checking again
            if debug:
                debugStats = [
                    len(finishedFileQueue),
                    len(checkingFileQueue),
                    len(toCheckFileQueue),
                    len(hashedFileQueue),
                    len(newFileQueue)
                ]
                if any(x > 0 for x in debugStats) or any(
                        x > 0 for x in prevDebugStats) or debugToggled:
                    eprint(
                        f"\t{debugStats[0]} finished, {debugStats[1]} checking, {debugStats[2]} to check, {debugStats[3]} hashed, {debugStats[4]} new"
                    )
                    debugToggled = False
                prevDebugStats = debugStats

            # if we didn't do anything, sleep for a bit before checking again
            if ((processedEvents - dupeEvents) < MAX_PROCESSED_BATCH_SIZE):
                sleepCount = 0
                while (not shuttingDown) and (sleepCount < 5):
                    time.sleep(1)
                    sleepCount += 1

            # end main event processing while loop

    # graceful shutdown
    if debug:
        eprint("Shutting down...")
    event_notifier.stop()
    if debug:
        eprint(f"Finished monitoring {watchDirs}")
Пример #13
0
 def run(cls, root):
     wm = pyinotify.WatchManager()
     handler = cls(root)
     handler.add_watches(wm)
     pyinotify.Notifier(wm, handler).loop()
Пример #14
0
 def __init__(self, *args, **kwargs):
     super(PyINotifyScanner, self).__init__(*args, **kwargs)
     self.log("Library of choice: pyinotify")
     self._watcher = pyinotify.WatchManager()
     self._notifier = self._generate_notifier()
Пример #15
0
def main():
    Notify.init(APP_NAME)
    urlToHandle = None

    # check parameter
    for arg in sys.argv:
        if(arg.startswith(PROTOCOL_SCHEME)):
            urlToHandle = arg
    if(urlToHandle == None):
        log("[MAIN]  Error: no valid '"+PROTOCOL_SCHEME+"' scheme parameter given.")
        exit(1)

    # create temporary directory
    os.makedirs(DOWNLOAD_DIR, exist_ok=True)

    # parse given companion URL
    log("[HANDLE URL]  "+urlToHandle)
    protocolPayload = unquote(urlToHandle).replace(PROTOCOL_SCHEME, "")
    protocolPayloadData = json.loads(protocolPayload)
    log("[METADATA-LINK]  "+protocolPayloadData["link"])

    # download metadata from provided link
    try:
        metadataString = urllib.request.urlopen(protocolPayloadData["link"]).read()
        metadata = json.loads(metadataString)
    except Exception as e:
        log("[GET METADATA ERROR]  "+str(e))
        exit(1)
    log("[METADATA]  "+str(metadata))

    # start file download
    try:
        filePath = DOWNLOAD_DIR + "/" + metadata["fileName"]
        log("[START DOWNLOAD TO]  "+filePath)
        urllib.request.urlretrieve(metadata["downloadUrl"], filePath)
        log("[DOWNLOAD FINISHED]  "+filePath)
    except Exception as e:
        log("[DOWNLOAD ERROR]  "+str(e))
        exit(1)

    # start application
    log("[LAUNCH]  "+filePath)
    subprocess.call(["xdg-open", filePath])

    # set up file watcher
    log("[SETUP FILE WATCHER]  " + DOWNLOAD_DIR)
    wm = pyinotify.WatchManager()
    wm.add_watch(DOWNLOAD_DIR, pyinotify.IN_MODIFY | pyinotify.IN_CLOSE_WRITE)
    notifier = pyinotify.ThreadedNotifier(wm,
        FileChangedHandler( dict={
            "fileId": metadata["fileId"],
            "fileName": metadata["fileName"],
            "filePath": filePath,
            "mimeType": metadata["mimeType"],
            "downloadUrl": metadata["downloadUrl"],
            "companionActionCallbackUrl": metadata["companionActionCallbackUrl"],
            "uploadUrl": metadata["uploadUrl"],
            "fileMd5": md5(filePath)
        })
    )
    notifier.start()

    # show GUI
    log("[SHOW GUI]")
    app = wx.App()
    window = CompanionWindow(metadata["fileName"])
    window.Show()
    app.MainLoop()

    # kill file watcher after window closed
    log("[EXIT]")
    notifier.stop()
    exit(0)
Пример #16
0
def main():

    while (1):
        # Connection establishment to DB
        sql_host = get_config(DEPLOY, 'sql_host').strip()
        sql_user = get_config(DEPLOY, 'sql_user').strip()
        sql_passwd = get_config(DEPLOY, 'sql_passwd').strip()
        sql_db = get_config(DEPLOY, 'sql_db').strip()
    
    #    sql_host_test = get_config(TEST, 'sql_host').strip()
    #    sql_user_test = get_config(TEST, 'sql_user').strip()
    #    sql_passwd_test = get_config(TEST, 'sql_passwd').strip()
    #    sql_db_test = get_config(TEST, 'sql_db').strip()
        
        try:
            conn = psycopg2.connect(database=sql_db, host=sql_host, user=sql_user, password=sql_passwd, port=LOCAL_DB_PORT)
            print 'CONNECTED!!'
            break
        except:
            print 'Unable to connect to database. Continue trying..'
            time.sleep(5)
            continue
    cursor = conn.cursor()

    # process existing files
    process_existing(conn,cursor)
#    print 'Done with Existing Files!'

    # Pyinotify
    wm = pyinotify.WatchManager()
    mask = pyinotify.IN_CREATE

    #### Start of class ####
    class EventHandler(pyinotify.ProcessEvent):
        last_checktime = 0
        def __init__(self):
            EventHandler.last_checktime = time.time()

        def process_IN_CREATE(self,event):
#            print 'Created:', event.pathname
            self.conn = conn
            self.cursor = cursor
            result = 0

            # Get household id.
            hid_time_seq = event.pathname.split('-')
            hid_time_seq = event.pathname.split('/')
            hid_str = hid_time_seq[-1]
            tmp_lst = hid_str.split('-')
            hid_str = tmp_lst[0]

            # Process only files. Ignore directory creation for now.
            if os.path.isdir(event.pathname):
                print 'New router added.'
            else:
                result = process_file(event.pathname, self.conn, self.cursor, hid_str)

            # Error with connection
            if result == -1:
                self.conn = reconnect_to_database();
                self.cursor = self.conn.cursor()

            # Sweep through directories again in 10 seconds.
            nowtime = time.time()
            if (nowtime - EventHandler.last_checktime) > 10.0:
                print "Start processing existing stuff again"
                result = process_existing(self.conn,self.cursor)

                # Error with connection
                if result < 0:
                    self.conn = reconnect_to_database();
                    self.cursor = self.conn.cursor()
                elif result == 0:
                    EventHandler.last_checktime = nowtime

    #### End of class ####

    handler = EventHandler()
    notifier = pyinotify.Notifier(wm, handler)
    wdd = wm.add_watch(CHECK_DIR,mask,rec=True)

    # Start
    notifier.loop()
Пример #17
0
 def watch_dir(self):
     mask = pyinotify.IN_CLOSE_WRITE
     wm = pyinotify.WatchManager()
     notifier = pyinotify.Notifier(wm, self.handler)
     wm.add_watch(self.path, mask)
     notifier.loop()
Пример #18
0
class Discoverer(threading.Thread):
    run = None

    def __init__(self):
        super(Discoverer, self).__init__()
        self.paths = {}

    def watch(self, path, handler):
        if self.run is None:  # pragma: nocover
            raise ImportError(
                "Must have either ``MacFSEvents`` (on Mac OS X) or "
                "``pyinotify`` (Linux) to enable runtime discovery.")

        self.paths[path] = handler

        # thread starts itself
        if not self.isAlive():
            self.daemon = True
            self.start()

    try:
        import fsevents
    except ImportError:  # pragma: nocover
        pass
    else:

        def run(self):  # pragma: nocover
            logger.info("Starting FS event listener.")

            def callback(subpath, subdir):
                for path, handler in self.paths.items():
                    path = string(path)
                    if subpath.startswith(path):
                        config = handler.configure()
                        config.commit()

            stream = self.fsevents.Stream(callback,
                                          *(string(x) for x in self.paths))
            observer = self.fsevents.Observer()
            observer.schedule(stream)
            observer.run()
            observer.unschedule(stream)
            observer.stop()
            observer.join()

    try:
        import pyinotify
    except ImportError:  # pragma: nocover
        pass
    else:
        wm = pyinotify.WatchManager()

        def run(self):  # pragma: nocover
            self.watches = []
            mask = self.pyinotify.IN_CREATE

            for path in self.paths:
                wdd = self.wm.add_watch(path, mask, rec=True)
                self.watches.append(wdd)

            class Event(self.pyinotify.ProcessEvent):
                def process_IN_CREATE(inst, event):
                    subpath = event.path
                    for path, handler in self.paths.items():
                        if subpath.startswith(path):
                            config = handler.configure()
                            config.commit()

            handler = Event()
            notifier = self.pyinotify.Notifier(self.wm, handler)
            notifier.loop()
            for wdd in self.watches:
                self.wm.rm_watch(wdd.values())
            notifier.stop()
            notifier.join()
Пример #19
0
    LOGGER.addHandler(fh)

    LOGGER = logging.getLogger('move_it_server')
    LOGGER.setLevel(logging.DEBUG)

    pyinotify.log.handlers = [fh]

    LOGGER.info("Starting up.")

    LOGGER.info("Starting publisher on port %s.", str(cmd_args.port))

    PUB = Publisher("tcp://*:" + str(cmd_args.port), "move_it_server")

    mask = (pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_TO
            | pyinotify.IN_CREATE)
    watchman = pyinotify.WatchManager()

    def local_reload_config(filename):
        return reload_config(filename, chains, create_listener_notifier,
                             MirrorRequestManager, PUB)

    notifier = pyinotify.ThreadedNotifier(
        watchman,
        EventHandler(local_reload_config, cmd_filename=cmd_args.config_file))
    watchman.add_watch(os.path.dirname(cmd_args.config_file), mask)

    def chains_stop(*args):
        global running
        running = False
        notifier.stop()
        terminate(chains, PUB)
Пример #20
0
    def handle(self, *args, **options):
        from django.conf import settings
        import os.path

        # Verify INOTIFIER_WATCH_PATHS is defined and non-empty
        try:
            assert settings.INOTIFIER_WATCH_PATHS
        except (AttributeError, AssertionError):
            raise CommandError('Missing/empty setting INOTIFIER_WATCH_PATHS')

        # Verify INOTIFIER_WATCH_PATHS is properly formatted
        try:
            len_3 = [len(tup) == 3 for tup in settings.INOTIFIER_WATCH_PATHS]
            assert all(len_3)
        except AssertionError:
            msg = 'setting INOTIFIER_WATCH_PATHS should be an iterable of '
            '3-tuples of the form '
            '[ ("/path1/", <pyinotify event mask>, <processor cls>), ]'
            raise CommandError(msg)

        # Verify monitor_paths exists and processor classes can be imported
        for mon_path, m, processor_cls in settings.INOTIFIER_WATCH_PATHS:
            if not os.path.exists(mon_path):
                err = "%s does not exist or you have insufficient permission" \
                      % mon_path
                raise CommandError(err)
            path = '.'.join(processor_cls.split('.')[0:-1])
            cls = processor_cls.split('.')[-1]
            try:
                mod = __import__(path, globals(), locals(), [cls], -1)
                getattr(mod, cls)
            except ImportError as e:
                err = 'Cannot import event processor module: %s\n\n%s' \
                      % (path, e)
                raise CommandError(err)
            except AttributeError:
                raise CommandError("%s does not exist in %s" % (cls, path))

        # Verify pyinotify is installed
        try:
            import pyinotify
        except ImportError as e:
            raise CommandError("Cannot import pyinotify: %s" % e)

        # Setup watches using pyinotify
        wm = pyinotify.WatchManager()
        for path, mask, processor_cls in settings.INOTIFIER_WATCH_PATHS:
            cls_path = '.'.join(processor_cls.split('.')[0:-1])
            cls = processor_cls.split('.')[-1]

            mod = __import__(cls_path, globals(), locals(), [cls], -1)
            Processor = getattr(mod, cls)

            wm.add_watch(path, mask, proc_fun=Processor())
            print "Adding watch on %s, processed by %s" % (path, processor_cls)

        notifier = pyinotify.Notifier(wm)

        # Setup pid file location. Try to use PROJECT_PATH but default to /tmp
        try:
            pid_file = os.path.join(settings.PROJECT_PATH, 'inotifier.pid')
        except AttributeError:
            pid_file = os.path.join("/tmp", "inotifier.pid")

        # Daemonize, killing any existing process specified in pid file
        daemon_kwargs = {}
        try:
            daemon_kwargs['stdout'] = settings.INOTIFIER_DAEMON_STDOUT
        except AttirbuteError:
            pass

        try:
            daemon_kwargs['stderr'] = settings.INOTIFIER_DAEMON_STDERR
        except AttirbuteError:
            pass

        notifier.loop(daemonize=True,
                      pid_file=pid_file,
                      force_kill=True,
                      **daemon_kwargs)

        print "File monitoring started"
Пример #21
0
 def run(self):
     # Verifica mudanças no ficheiro config.
     wm = pyinotify.WatchManager()
     wm.add_watch('config.ini', pyinotify.IN_MODIFY, onChange)
     notifier = pyinotify.Notifier(wm)
     notifier.loop()
Пример #22
0
def write_pdf(input_path: Path,
              output_dir: Path,
              print_script: str = 'window.print();'):
    """Writes a PDF to the same directory as the HTML document at input_path"""
    input_url = f'file://{input_path.resolve().as_posix()}'
    if output_dir:
        output_path = output_dir.resolve() / input_path.name
    else:
        output_path = input_path.resolve()
    output_path = output_path.with_suffix('.pdf')

    chrome_options = webdriver.chrome.options.Options()
    # Enable silent printing
    chrome_options.add_argument('--kiosk-printing')
    prefs = {
        # Set printing settings
        'printing.print_preview_sticky_settings.appState':
        _PRINTING_APPSTATE.read_text(),
        # Printing directory is the same as file saving directory
        'savefile.default_directory':
        str(output_path.parent),
        'savefile.type':
        0,
    }
    # add_experimental_option() essentially sets a property on the underlying
    # chromeOptions object.
    # See https://chromedriver.chromium.org/capabilities under "chromeOptions object"
    # for all valid properties
    chrome_options.add_experimental_option('prefs', prefs)
    #local_state = dict()
    #chrome_options.add_experimental_option('localState', local_state)
    driver = webdriver.Chrome(options=chrome_options)
    try:
        driver.get(input_url)

        # Remove old file
        if output_path.exists():
            output_path.unlink()

        # Setup inotify for printing
        watch_manager = pyinotify.WatchManager()

        class PrintingEventHandler(pyinotify.ProcessEvent):
            '''Exit when the output file has been written'''
            def process_IN_CLOSE(self, event):
                if event.name:
                    if event.name == output_path.name:
                        # Hack to abort loop cleanly
                        raise KeyboardInterrupt()
                    else:
                        print('DEBUG: Closed file:', event.name)

        notifier = pyinotify.Notifier(watch_manager, PrintingEventHandler())

        # Begin watching for file changes
        watch_manager.add_watch(
            str(output_path.parent),
            mask=pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CLOSE_NOWRITE,
            quiet=False,
        )

        # Print file. This is non-blocking
        driver.execute_script(print_script)

        # Block until the output file is created and written
        # This will automatically cleanup inotify once it exits
        notifier.loop()
    finally:
        # Cleanup chromedriver
        driver.quit()
Пример #23
0
def activity_watcher(active_path='/home/storage/active', ignore_dot=True):
    """
    Watch the /projects directory, and when any file or path changes,
    touch the file active_path/project_id.

    If ignore_dot is true (the default), do not trigger changes when a path
    that begins ~/.somepath changes.

    NOTES:

     - when this is running projects often can't be unmounted

     - this function *must* be run as root, since otherwise there is no way to use inotify to watch for changes on subdirs.

    """
    import pyinotify
    wm   = pyinotify.WatchManager()
    mask = pyinotify.IN_CREATE | pyinotify.IN_MOVED_TO | pyinotify.IN_MODIFY | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_DELETE

    last_add = {}
    def add(pathname):
        if len(pathname) < 47:
            return
        v = pathname.split('/')
        project_id = v[2]
        # avoid excessive filesystem touching by ignoring requests for 15 seconds after activity.
        t = time.time()
        if last_add.get(project_id) >= t-15:
            return
        last_add[project_id] = t
        log.debug("activity: %s", pathname)
        active = os.path.join(active_path, v[2])
        cmd("mkdir -p '%s'; touch '%s'; chown -R storage. '%s'"%(active_path, active, active_path))

    class EventHandler(pyinotify.ProcessEvent):
        def process_IN_CREATE(self, event):
            add(event.pathname)
        def process_IN_DELETE(self, event):
            add(event.pathname)
        def process_IN_MOVED_TO(self, event):
            add(event.pathname)
        def process_IN_MODIFY(self, event):
            add(event.pathname)
        def process_IN_CLOSE_WRITE(self, event):
            add(event.pathname)

    handler = EventHandler()

    # we receive inotify events for *at most* timeout seconds, then handle them all
    notifier = pyinotify.Notifier(wm, handler)
    watchers = []
    log.info("adding inotify watcher to /projects...")
    if ignore_dot:
        def exclude_filter(s):
            return s[47:].startswith('.')
    else:
        def exclude_filter(s):
            return False

    watchers.append(wm.add_watch('/projects', mask, rec=True, auto_add=True, exclude_filter=exclude_filter))
    log.info("done: now watching for changes")
    notifier.loop()
Пример #24
0
def main():
    global logging
    logging.basicConfig(level=logging.DEBUG)

    global dag_info
    path1 = 'centralized_scheduler/dag.txt'
    path2 = 'centralized_scheduler/nodes.txt'
    dag_info = read_config(path1, path2)

    global username, password, ssh_port, num_retries, MONGO_DOCKER, MONGO_SVC, FLASK_SVC, FLASK_DOCKER, task_queue_size
    # Load all the confuguration
    INI_PATH = '/jupiter_config.ini'
    config = configparser.ConfigParser()
    config.read(INI_PATH)
    username = config['AUTH']['USERNAME']
    password = config['AUTH']['PASSWORD']
    ssh_port = int(config['PORT']['SSH_SVC'])
    num_retries = int(config['OTHER']['SSH_RETRY_NUM'])
    task_queue_size = int(config['OTHER']['TASK_QUEUE_SIZE'])
    MONGO_SVC = int(config['PORT']['MONGO_SVC'])
    MONGO_DOCKER = int(config['PORT']['MONGO_DOCKER'])
    FLASK_SVC = int(config['PORT']['FLASK_SVC'])
    FLASK_DOCKER = int(config['PORT']['FLASK_DOCKER'])

    global BOKEH_SERVER, BOKEH_PORT, BOKEH
    BOKEH_SERVER = config['BOKEH_LIST']['BOKEH_SERVER']
    BOKEH_PORT = int(config['BOKEH_LIST']['BOKEH_PORT'])
    BOKEH = int(config['BOKEH_LIST']['BOKEH'])

    update_interval = 1

    prepare_global_info()

    # Prepare transfer-runtime file:
    global runtime_sender_log, RUNTIME, TRANSFER, transfer_type
    RUNTIME = int(config['CONFIG']['RUNTIME'])
    TRANSFER = int(config['CONFIG']['TRANSFER'])

    if TRANSFER == 0:
        transfer_type = 'scp'

    runtime_sender_log = open(
        os.path.join(os.path.dirname(__file__), 'runtime_transfer_sender.txt'),
        "w")
    s = "{:<10} {:<10} {:<10} {:<10} \n".format('Node_name', 'Transfer_Type',
                                                'File_Path', 'Time_stamp')
    runtime_sender_log.write(s)
    runtime_sender_log.close()
    runtime_sender_log = open(
        os.path.join(os.path.dirname(__file__), 'runtime_transfer_sender.txt'),
        "a")
    #Node_name, Transfer_Type, Source_path , Time_stamp

    if RUNTIME == 1:
        global runtime_receiver_log
        runtime_receiver_log = open(
            os.path.join(os.path.dirname(__file__),
                         'runtime_transfer_receiver.txt'), "w")
        s = "{:<10} {:<10} {:<10} {:<10} \n".format('Node_name',
                                                    'Transfer_Type',
                                                    'File_path', 'Time_stamp')
        runtime_receiver_log.write(s)
        runtime_receiver_log.close()
        runtime_receiver_log = open(
            os.path.join(os.path.dirname(__file__),
                         'runtime_transfer_receiver.txt'), "a")
        #Node_name, Transfer_Type, Source_path , Time_stamp

    web_server = MonitorRecv()
    web_server.start()

    # Update execution information file
    _thread.start_new_thread(update_exec_profile_file, ())

    _thread.start_new_thread(schedule_update_price, (update_interval, ))
    # Update pricing information every interval

    # watch manager
    wm = pyinotify.WatchManager()
    input_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                'input/')
    wm.add_watch(input_folder, pyinotify.ALL_EVENTS, rec=True)
    logging.debug('starting the input monitoring process\n')
    eh = Handler()
    notifier = pyinotify.ThreadedNotifier(wm, eh)
    notifier.start()

    output_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 'output/')
    wm1 = pyinotify.WatchManager()
    wm1.add_watch(output_folder, pyinotify.ALL_EVENTS, rec=True)
    logging.debug('starting the output monitoring process\n')
    eh1 = Handler1()
    notifier1 = pyinotify.Notifier(wm1, eh1)
    notifier1.loop()
Пример #25
0
def inotify_watch(app, args):

    import pyinotify

    parent_paths_to_ignore = []

    def must_be_saved(path):

        if os.path.isdir(path):
            return False

        base = os.path.basename(path)
        for pattern in app.conf['ignore.filename']:
            if fnmatch.fnmatch(base, pattern):
                #lg.debug("ignoring (base) %s", path)
                return False

        thisdir = os.path.dirname(path).rstrip('/')

        while True:
            thisbase = os.path.basename(thisdir)
            if thisdir in parent_paths_to_ignore:
                #lg.debug("Ignoring path cache: %s", path)
                return False

            if thisbase in app.conf['ignore.inpath']:
                parent_paths_to_ignore.append(thisdir)
                return False

            for pp in app.conf['ignore.parentpattern']:
                pptest = os.path.join(thisdir, pp)
                if os.path.exists(pptest):
                    #lg.debug("Ignoring: %s because of %s", path, pptest)
                    parent_paths_to_ignore.append(thisdir)
                    return False

            thisdir = os.path.dirname(thisdir).rstrip('/')
            if not thisdir:
                break

        return True

    class MadEventHandler(pyinotify.ProcessEvent):
        def process_IN_DELETE(self, event):
            lg.info("Delete: %s", event.pathname)
            maf = mad2.util.get_mad_file(app, event.pathname)
            maf.delete()
            maf.flush()

        def process_IN_MOVED_FROM(self, event):
            return self.process_IN_DELETE(event)

        def process_default(self, event):

            pn = event.pathname

            try:
                if os.path.exists(pn) and \
                   os.path.getsize(pn) < args.minsize:
                    return

                if not must_be_saved(pn):
                    return

            except Exception as e:
                lg.error("problem processing %s", pn)
                lg.error(" - %s", str(e))
                return

            try:
                lg.debug("saving: %s", event.pathname)
                maf = mad2.util.get_mad_file(app, event.pathname)
                maf.save()
                maf.flush()
            except Exception as e:
                lg.error("problem mad saving %s", pn)
                lg.error(" - %s", str(e))


    mask = pyinotify.IN_DELETE \
        | pyinotify.IN_MOVED_FROM \
        | pyinotify.IN_CREATE \
        | pyinotify.IN_MODIFY \
        | pyinotify.IN_ATTRIB \
        | pyinotify.IN_MOVED_TO

    wm = pyinotify.WatchManager()

    handler = MadEventHandler()
    notifier = pyinotify.Notifier(wm, handler)
    wm.add_watch(args.path, mask, rec=True)
    notifier.loop()
Пример #26
0
 def __init__(self, inbox_q, cluster, **kwargs):
     super(PublicConfigFileWatcher, self).__init__(inbox_q, cluster)
     self.wm = pyinotify.WatchManager()
     self.wm.add_watch(PATH_TO_SYSTEM_PAASTA_CONFIG_DIR, self.mask, rec=True)
     self.notifier = pyinotify.Notifier(watch_manager=self.wm,
                                        default_proc_fun=PublicConfigEventHandler(filewatcher=self))
Пример #27
0
def watcher(config):
    # for background processes polling
    processes = {}

    wdds      = dict()
    notifiers = dict()

    # read jobs from config file
    for section in config.sections():
        # mandatory opts
        mask      = parseMask(config.get(section, 'events').split(','))
        folder    = config.get(section, 'watch')
        command   = config.get(section, 'command')
        # optional opts (i.e. with default values)
        recursive = config.getboolean(section, 'recursive')
        autoadd   = config.getboolean(section, 'autoadd')
        excluded  = None if not config.get(section, 'excluded') else set(config.get(section, 'excluded').split(','))
        include_extensions = None if not config.get(section, 'include_extensions') else set(config.get(section, 'include_extensions').split(','))
        exclude_extensions = None if not config.get(section, 'exclude_extensions') else set(config.get(section, 'exclude_extensions').split(','))
        exclude_re = None if not config.get(section, 'exclude_re') else re.compile(config.get(section, 'exclude_re'))
        background = config.getboolean(section, 'background')
        log_output = config.getboolean(section, 'log_output')

        outfile = config.get(section, 'outfile')
        if outfile:
            t = string.Template(outfile)
            outfile = t.substitute(job=section)
            if log_output:
               logging.debug("logging '%s' output to '%s'", section, outfile)
        elif log_output:
            logging.debug("logging '%s' output to daemon log", section)

        action_on_success = config.get(section, 'action_on_success')
        action_on_failure = config.get(section, 'action_on_failure')

        logging.info("%s: watching '%s'", section, folder)

        wm = pyinotify.WatchManager()
        handler = EventHandler(processes,
                               job = section,
                               mask = mask,
                               folder = folder,
                               command = command,
                               log_output = log_output,
                               include_extensions = include_extensions,
                               exclude_extensions = exclude_extensions,
                               exclude_re = exclude_re,
                               background = background,
                               action_on_success = action_on_success,
                               action_on_failure = action_on_failure,
                               outfile = outfile
                              )

        wdds[section] = wm.add_watch(folder, mask, rec=recursive, auto_add=autoadd)
        # Remove watch about excluded dir.
        if excluded:
            for excluded_dir in excluded:
                for (k, v) in list(wdds[section].items()):
                    try:
                        if k.startswith(excluded_dir):
                            wm.rm_watch(v)
                            wdds[section].pop(k)
                    except UnicodeDecodeError:
                        logging.exception("Failed to check exclude for %r (decoding error)", k)
                logging.debug("Excluded dirs : %s", excluded_dir)
        # Create ThreadNotifier so that each job has its own thread
        notifiers[section] = pyinotify.ThreadedNotifier(wm, handler)
        notifiers[section].setName(section)

    # Start all the notifiers.
    for (name, notifier) in notifiers.items():
        try:
            notifier.start()
            logging.debug('Notifier for %s is instanciated', name)
        except pyinotify.NotifierError as err:
            logging.warning('%r %r', sys.stderr, err)

    # Wait for SIGTERM
    try:
        while 1:
            try:
                # build new list as we want to change dict on-fly
                for process in list(processes):
                    if process.poll() is not None:
                        stdoutdata = get_stdout_log(processes[process]['logHandler'])
                        process_report(process, processes[process]['opts'], stdoutdata)
                        del processes[process]
            except Exception as err:
                logging.exception("Failed to collect children:")
            time.sleep(0.1)
    except:
        cleanup_notifiers(notifiers)
Пример #28
0
 def __init__(self, inbox_q, cluster, **kwargs):
     super(SoaFileWatcher, self).__init__(inbox_q, cluster)
     self.wm = pyinotify.WatchManager()
     self.wm.add_watch(DEFAULT_SOA_DIR, self.mask, rec=True)
     self.notifier = pyinotify.Notifier(watch_manager=self.wm,
                                        default_proc_fun=YelpSoaEventHandler(filewatcher=self))
Пример #29
0
def on_loop(notifier, counter):
    """
    Dummy function called after each event loop, this method only
    ensures the child process eventually exits (after 5 iterations).
    """
    if counter.count > 4:
        # Loops 5 times then exits.
        sys.stdout.write("Exit\n")
        notifier.stop()
        sys.exit(0)
    else:
        sys.stdout.write("Loop %d\n" % counter.count)
        counter.plusone()


wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm)

# watched events
mask = pyinotify.IN_DELETE | pyinotify.IN_CLOSE_WRITE

# get this watch list from the configuration
wm.add_watch('/media/PIHU_DATA', mask, rec=True)
#wm.add_watch('/media/PIHU_DATA2', pyinotify.ALL_EVENTS)
wm.add_watch('/media/PIHU_SMB', mask,
             rec=True)  # will probably not work for SMB mounts..

on_loop_func = functools.partial(on_loop, counter=Counter())

# Notifier instance spawns a new process when daemonize is set to True. This
# child process' PID is written to /tmp/pyinotify.pid (it also automatically
Пример #30
0
 def __init__(self, watchdirs, handler, mask):
     super(WatchInfo, self).__init__()
     self.watchdirs = watchdirs
     self.wm = pyinotify.WatchManager()  # Watch Manager
     self.notifier = pyinotify.Notifier(self.wm, handler)
     self.wdd = self.wm.add_watch(watchdirs, mask, rec=True, auto_add=True)