Пример #1
0
 def remove_if_stale(self, cutoff):
     '''
     given number of seconds, see if file is older than this many seconds
     and remove file if so.  we do this by opening the file exclusively first,
     stat on the open file handle, then remove path if it checks out.
     return True on removal, False for anything else including errors.
     '''
     removed = False
     try:
         # we're not going to write anything but have to open for write
         # in order to get LOCK_EX
         fhandle = open(self.lockfile.get_path(), "a+")
         # try to get the lock. if we can't then we give up
         try:
             fcntl.lockf(fhandle, fcntl.LOCK_EX | fcntl.LOCK_NB)
         except Exception:
             # fail to get lock or some other error
             fhandle.close()
             return removed
         if self._is_stale(cutoff, fhandle.fileno()):
             removed = self._unlock()
         else:
             # if the file did not exist, our open call would have created
             # it, and then we would have an empty file.  No one else would
             # have written to it because we have the LOCK_EX here.
             # See if that's the case and if so, clean up
             filesize = os.fstat(fhandle.fileno()).st_size
             if not filesize:
                 removed = self._unlock()
         # lock removed now
         fhandle.close()
         return removed
     except Exception:
         pass
     return False
Пример #2
0
    def __init__(self, flavor_id=""):
        import sys
        self.initialized = False
        basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        # os.path.splitext(os.path.abspath(sys.modules['__main__'].__file__))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        self.lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename)

        logger.debug("SingleInstance lockfile: " + self.lockfile)
        if sys.platform == 'win32':
            try:
                # file already exists, we try to remove (in case previous execution was interrupted)
                if os.path.exists(self.lockfile):
                    os.unlink(self.lockfile)
                self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
            except OSError:
                type, e, tb = sys.exc_info()
                if e.errno == 13:
                    logger.error("Another instance is already running, quitting.")
                    sys.exit(-1)
                print(e.errno)
                raise
        else:  # non Windows
            import fcntl
            self.fp = open(self.lockfile, 'w')
            try:
                fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                logger.warning("Another instance is already running, quitting.")
                sys.exit(-1)
        self.initialized = True
Пример #3
0
		def aleradyrunning(self):
			try:
				fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
			except IOError:
				return True
			else:
				return False
Пример #4
0
def processFile(filename):
  """Process and upload a file.
  Return True if the file has been processed as completely as it will ever be and can be deleted"""

  with open(filename, 'r+') as f:
    try:
      # make sure we're alone on that file
      fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError:
      return False

    try:
      result = json.load(f)
      st = os.stat(filename)
      result['timestamp'] = st.st_ctime
      trimDict(result)
      return sendException(result, filename)
    except ValueError, ex:
      print >> sys.stderr, "Could not read %s:" % filename
      print >> sys.stderr, '\n"""'
      f.seek(0)
      print >> sys.stderr, f.read()
      print >> sys.stderr, '"""\n'
      print >> sys.stderr, str(ex)
      return True # so this bogus file gets deleted
    finally:
Пример #5
0
def pycbc_compile_function(code,arg_names,local_dict,global_dict,
                     module_dir,
                     compiler='',
                     verbose=1,
                     support_code=None,
                     headers=None,
                     customize=None,
                     type_converters=None,
                     auto_downcast=1,
                     **kw):
    """ Dummy wrapper around scipy weave compile to implement file locking
    """
    from scipy.weave.inline_tools import _compile_function
    headers = [] if headers is None else headers
    lockfile_dir = os.path.dirname(module_dir)
    lockfile_name = os.path.join(lockfile_dir, 'code_lockfile')
    print("attempting to aquire lock '%s' for compiling code" % lockfile_name)
    if not os.path.exists(lockfile_dir):
        os.makedirs(lockfile_dir)
    lockfile = open(lockfile_name, 'w')
    fcntl.lockf(lockfile, fcntl.LOCK_EX)
    print ("we have aquired the lock")
    
    func = _compile_function(code,arg_names, local_dict, global_dict,
                     module_dir, compiler, verbose,
                     support_code, headers, customize,
                     type_converters,
                     auto_downcast, **kw)

    fcntl.lockf(lockfile, fcntl.LOCK_UN)
    print ("the lock has been released")

    return func
Пример #6
0
    def close(self):
        ''' terminate the connection '''
        cache_key = self._cache_key()
        SSH_CONNECTION_CACHE.pop(cache_key, None)
        SFTP_CONNECTION_CACHE.pop(cache_key, None)
        if self.sftp is not None:
            self.sftp.close()

        if C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():

            # add any new SSH host keys -- warning -- this could be slow
            lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock") 
            dirname = os.path.dirname(self.keyfile)
            if not os.path.exists(dirname):
                os.makedirs(dirname)

            KEY_LOCK = open(lockfile, 'w')
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
            try:
                # just in case any were added recently
                self.ssh.load_system_host_keys()
                self.ssh._host_keys.update(self.ssh._system_host_keys)
                self._save_ssh_host_keys(self.keyfile)
            except:
                # unable to save keys, including scenario when key was invalid
                # and caught earlier
                traceback.print_exc()
                pass
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)

        self.ssh.close()
Пример #7
0
    def __init__(self):
        self.recent_files = []
        self.tlist = []
        self.item = None

        fname = os.path.expanduser(GRAMPS_FILENAME)
        if not os.path.exists(fname):
            return # it's the first time gramps has ever been run

        try:
            with open(fname, "rb") as xml_file:
                if USE_LOCK:
                    fcntl.lockf(xml_file, fcntl.LOCK_SH)

                parser = ParserCreate()
                parser.StartElementHandler = self.start_element
                parser.EndElementHandler = self.end_element
                parser.CharacterDataHandler = self.characters
                parser.ParseFile(xml_file)
            # all advisory locks on a file are released on close
        except IOError as err:
            logging.warning(
                _("Unable to open list of recent DBs file {fname}: {error}"
                 ).format(fname=fname, error=err))
        except ExpatError as err:
            logging.error(
                _("Error parsing list of recent DBs from file {fname}: "
                  "{error}.\nThis might indicate a damage to your files.\n"
                  "If you're sure there is no problem with other files, "
                  "delete it, and restart Gramps."
                 ).format(fname=fname, error=err))
def test_mxnet_local_data_local_script():
    local_mode_lock_fd = open(LOCK_PATH, 'w')
    local_mode_lock = local_mode_lock_fd.fileno()

    script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py')
    data_path = os.path.join(DATA_DIR, 'mxnet_mnist')

    mx = MXNet(entry_point=script_path, role='SageMakerRole',
               train_instance_count=1, train_instance_type='local',
               sagemaker_session=LocalNoS3Session())

    train_input = 'file://' + os.path.join(data_path, 'train')
    test_input = 'file://' + os.path.join(data_path, 'test')

    mx.fit({'train': train_input, 'test': test_input})
    endpoint_name = mx.latest_training_job.name
    try:
        # Since Local Mode uses the same port for serving, we need a lock in order
        # to allow concurrent test execution. The serving test is really fast so it still
        # makes sense to allow this behavior.
        fcntl.lockf(local_mode_lock, fcntl.LOCK_EX)
        predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name)
        data = numpy.zeros(shape=(1, 1, 28, 28))
        predictor.predict(data)
    finally:
        mx.delete_endpoint()
        time.sleep(5)
        fcntl.lockf(local_mode_lock, fcntl.LOCK_UN)
Пример #9
0
    def command(self):
        "run command"
        self.init()
        if self.options.index_name is None:
            print "\nProvide an index to update\n"
            print self.parser.print_help()
            sys.exit(2)

        if self.options.index_name not in ['messages', 'archive']:
            print "\nProvide a valid index to update\n"
            print self.parser.print_help()
            sys.exit(2)
        if not os.path.exists('/usr/bin/indexer'):
            print "\nSphinx indexer is not installed\n"
            sys.exit(2)

        try:
            lockfile = os.path.join(self.conf['baruwa.locks.dir'],
                                    'updatedelta.lock')
            with open(lockfile, 'w+') as lock:
                fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
                update_index(self.conf['sphinx.url'],
                        self.options.index_name,
                        self.options.index_has_rt)
        except IOError:
            print >> sys.stderr, "Another instance is running."
            sys.exit(2)
        finally:
            Session.close()
Пример #10
0
def openlock(filename, operation, wait=True):
    """
    Returns a file-like object that gets a fnctl() lock.

    `operation` should be one of LOCK_SH or LOCK_EX for shared or
    exclusive locks.

    If `wait` is False, then openlock() will not block on trying to
    acquire the lock.
    """
    f = os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT, 0666), "r+")
    if not wait:
        operation |= LOCK_NB
    try:
        lockf(f.fileno(), operation)
    except IOError as err:
        if not wait and err.errno in (EACCES, EAGAIN):
            from django.core.management.base import CommandError
            raise CommandError("Could not acquire lock on '%s' held by %s." %
                               (filename, f.readline().strip()))
        raise
    print("%s:%d" % (socket.gethostname(), os.getpid()), file=f)
    f.truncate()
    f.flush()
    return f
def test_local_mode_serving_from_s3_model(sagemaker_local_session, mxnet_model):
    local_mode_lock_fd = open(LOCK_PATH, 'w')
    local_mode_lock = local_mode_lock_fd.fileno()

    model_data = mxnet_model.model_data
    boto_session = sagemaker_local_session.boto_session
    default_bucket = sagemaker_local_session.default_bucket()
    uploaded_data = tar_and_upload_dir(boto_session, default_bucket,
                                       'test_mxnet_local_mode', '', model_data)

    s3_model = MXNetModel(model_data=uploaded_data.s3_prefix, role='SageMakerRole',
                          entry_point=mxnet_model.entry_point, image=mxnet_model.image,
                          sagemaker_session=sagemaker_local_session)

    predictor = None
    try:
        # Since Local Mode uses the same port for serving, we need a lock in order
        # to allow concurrent test execution. The serving test is really fast so it still
        # makes sense to allow this behavior.
        fcntl.lockf(local_mode_lock, fcntl.LOCK_EX)
        predictor = s3_model.deploy(initial_instance_count=1, instance_type='local')
        data = numpy.zeros(shape=(1, 1, 28, 28))
        predictor.predict(data)
    finally:
        if predictor:
            predictor.delete_endpoint()
            time.sleep(5)
        fcntl.lockf(local_mode_lock, fcntl.LOCK_UN)
Пример #12
0
def endpoint_logs(request):
    """Saves an endpoint server Apache logs."""
    response, group, ip = endpoint_common(request)
    if response is not None:
        return response

    # Take a lock on the file
    while True:
        logfile = file(os.path.join(CONFIG['config']['logdir'], "access-%s-%s.log" % (group, ip)), 'a')
        try:
            fcntl.lockf(logfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            time.sleep(1)
        else:
            break

    # Write out the log lines
    logfile.write(request.POST['data'])
    logfile.flush()

    # Unlock the file
    fcntl.lockf(logfile, fcntl.LOCK_UN)

    # Close the file
    logfile.close()

    # Write out that everything went okay
    response = http.HttpResponse(content_type='text/plain')
    response.write('OK\n')
    return response
Пример #13
0
    def open(self, serial):
        '''
        Opens a new warc file with filename prefix `self.prefix` and serial
        number `self.serial` and assigns file handle to `self.f`.
        '''
        if not os.path.exists(self.directory):
            self.logger.info(
                    "warc destination directory %s doesn't exist, creating it",
                    self.directory)
            os.mkdir(self.directory)

        self.finalname = self.filename(serial)
        self.logger.trace('opening %s', self.finalname)
        self.path = os.path.sep.join(
                [self.directory, self.finalname + self.open_suffix])

        self.f = open(self.path, 'wb')
        # if no '.open' suffix is used for WARC, acquire an exclusive
        # file lock.
        if self.open_suffix == '':
            try:
                fcntl.lockf(self.f, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError as exc:
                self.logger.error(
                        'could not lock file %s (%s)', self.path, exc)
        return self.f
Пример #14
0
 def unlock(self):
     if not self.owned:
         raise Exception("can't unlock %r - we don't own it" % self.name)
     self._open_lock()
     fcntl.lockf(self.lockfile, fcntl.LOCK_UN, 0, 0)
     if vars.DEBUG_LOCKS: debug("%s unlock\n", self.name)
     self.owned = False
Пример #15
0
	def completedLoad(self):
		#
		# we're done with kickstart file generation.
		#
		# update the lock file to reflect the fact that one more
		# unit of kickstart file generation can now occur
		#

		fp = self.openLockFile()

		#
		# if lockfile doesn't exist or is unreadable return
		#
		if fp == None:
			return 0
		
		fcntl.lockf(fp, fcntl.LOCK_EX)

		input = fp.readline()
		siblings = int(input)
		siblings = siblings + 1

		fp.seek(0)
		fp.write("%d\n" % siblings)
		fp.flush()

		fcntl.lockf(fp, fcntl.LOCK_UN)
		fp.close()

		return 1
Пример #16
0
    def write(self, filename=None):
        """Write the list of recently used files to disk.
        
        If the instance is already associated with a file, filename can be
        omitted to save it there again.
        """
        if not filename and not self.filename:
            raise ParsingError('File not found', filename)
        elif not filename:
            filename = self.filename

        f = open(filename, "w")
        fcntl.lockf(f, fcntl.LOCK_EX)
        f.write('<?xml version="1.0"?>\n')
        f.write("<RecentFiles>\n")

        for r in self.RecentFiles:
            f.write("  <RecentItem>\n")
            f.write("    <URI>%s</URI>\n" % xml.sax.saxutils.escape(r.URI))
            f.write("    <Mime-Type>%s</Mime-Type>\n" % r.MimeType)
            f.write("    <Timestamp>%s</Timestamp>\n" % r.Timestamp)
            if r.Private == True:
                f.write("    <Private/>\n")
            if len(r.Groups) > 0:
                f.write("    <Groups>\n")
                for group in r.Groups:
                    f.write("      <Group>%s</Group>\n" % group)
                f.write("    </Groups>\n")
            f.write("  </RecentItem>\n")

        f.write("</RecentFiles>\n")
        fcntl.lockf(f, fcntl.LOCK_UN)
        f.close()
Пример #17
0
    def __init__(self, pid_path):
        self._pid_path = pid_path
        self._other_running = False
        ensuredirs(self._pid_path)
        self._lockfile = None

        try:
            self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY)
        except:
            raise SoleError('Cannot open lockfile (path = %s)' % self._pid_path)

        try:
            fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
            
        except IOError:
            self._other_running = True
            try:
                f = open(self._pid_path, 'r')
                pid = f.read().strip()
                f.close()
            except:
                pid = '?'

            raise SoleError('Other instance is running (pid = %s)' % pid)

        try:
            os.ftruncate(self._lockfile, 0)
            os.write(self._lockfile, '%i\n' % os.getpid())
            os.fsync(self._lockfile)

        except:
            pass # the pid is only stored for user information, so this is allowed to fail
Пример #18
0
def perform_request_save_file(filename, filecontent):
    """ Handles the save request for the given file name and file content

        @param filename: relative path of the file to save
        @type filename: str
        @param filecontent: content to be written in the file
        @type filecontent: str

        @return: dict with key "status", possible codes are:
            error_forbidden_path
            error_file_not_writable
            save_success
        @rtype: dict
    """
    response = {}
    file_path = INFO_PREFIX + filename
    if not file_in_info_space(file_path):
        response["status"] = "error_forbidden_path"
        return response
    try:
        file_desc = open(file_path, 'w')
        # Lock the file while writing to avoid clashes among users
        fcntl.lockf(file_desc.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
        try:
            file_desc.write(filecontent)
        finally:
            fcntl.lockf(file_desc.fileno(), fcntl.LOCK_UN)
        response["status"] = "save_success"
    except IOError:
        response["status"] = "error_file_not_writable"
    return response
Пример #19
0
def lock(fileobj):
    """Lock a file object 'safely'.

    That means a failure to lock because the platform doesn't
    support fcntl or filesystem locks is not considered a
    failure. This call does block.

    Returns whether or not the lock was successful, or
    raises an exception in more extreme circumstances (full
    lock table, invalid file).
    """

    try:
        import fcntl
    except ImportError:
        return False
    else:
        try:
            fcntl.lockf(fileobj, fcntl.LOCK_EX)
        except IOError:
            # FIXME: There's possibly a lot of complicated
            # logic that needs to go here in case the IOError
            # is EACCES or EAGAIN.
            return False
        else:
            return True
Пример #20
0
 def new_func(*args, **kwargs):
     """Wrapper function."""
     with open(LOCK_FILE, 'w') as fhdl:
         fcntl.lockf(fhdl, fcntl.LOCK_EX)
         result = func(*args, **kwargs)
         fcntl.lockf(fhdl, fcntl.LOCK_UN)
         return result
Пример #21
0
 def get_lock(self):
     try:
         self._lock_fd = os.open(self._lock_path,
             os.O_CREAT | os.O_WRONLY, 0644)
         fcntl.lockf(self._lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
     except: return False
     return True
Пример #22
0
    def process(self):
        (opts, args) = getopts()
        chkopts(opts)
        self.up_progress(10)

        passwd = None
        if opts.passwd_file is not None and os.path.exists(opts.passwd_file):
            try:
                fp = open(opts.passwd_file, "r")
                try:
                    self.up_progress(10)
                    fcntl.lockf(fp.fileno(), fcntl.LOCK_SH)
                    try:
                        passwd = fp.readline().strip("\n")
                    finally:
                        fcntl.lockf(fp.fileno(), fcntl.LOCK_UN)
                    self.up_progress(10)
                finally:
                    fp.close()

            except Exception, e:
                self.logger.error('Failed to read.- dom=%s passwd_file=%s' \
                      % (opts.name,opts.passwd_file))
                print >>sys.stderr,_('Failed to read.- dom=%s passwd_file=%s') \
                      % (opts.name,opts.passwd_file)
                raise e

            os.remove(opts.passwd_file)
Пример #23
0
    def run_job(self, job_hash):
        """
        Executes the corresponding function defined in CRONJOBS
        """
        job = self.__get_job_by_hash(job_hash)
        job_name = job[1]
        job_args = job[2] if len(job) > 2 and not isinstance(job[2], string_type) else []
        job_kwargs = job[3] if len(job) > 3 else {}

        lock_file_name = None
        if self.settings.LOCK_JOBS:
            lock_file = open(os.path.join(tempfile.gettempdir(), 'django_crontab_%s.lock' % job_hash), 'w')
            lock_file_name = lock_file.name
            try:
                fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except:
                logger.warning('Tried to start cron job %s that is already running.', job)
                return

        module_path, function_name = job_name.rsplit('.', 1)
        module = import_module(module_path)
        func = getattr(module, function_name)
        try:
            func(*job_args, **job_kwargs)
        except:
            logger.exception('Failed to complete cronjob at %s', job)

        if self.settings.LOCK_JOBS:
            try:
                os.remove(lock_file_name)
            except:
                logger.exception('Error deleting lockfile %s of job %s', lock_file_name, job)
Пример #24
0
    def get_lock_session(self, id):
        """get_lock_session(id)
        Tries to determine the session that holds the lock on id for information purposes, and return an informative string.
        Returns None on failure
        """
        self.safe_LockCheck()

        self.global_lock_acquire()
        try:
            sessions = [s for s in os.listdir(self.sdir) if s.endswith(self.name + ".locks")]
            for session in sessions:
                try:
                    sf = os.path.join(self.sdir, session)
                    fd = -1
                    if not self.afs:
                        fd = os.open(sf, os.O_RDONLY)
                        fcntl.lockf(fd, fcntl.LOCK_SH)  # ONLY NFS
                    sf_file = open(sf)
                    names = pickle.load(sf_file)
                    sf_file.close()
                    if not self.afs and fd > 0:
                        fcntl.lockf(fd, fcntl.LOCK_UN)  # ONLY NFS
                        os.close(fd)
                    if id in names:
                        return self.session_to_info(session)
                except Exception as err:
                    logger.debug("Get Lock Session Exception: %s" % str(err))
                    continue
        finally:
            self.global_lock_release()
Пример #25
0
 def lock(file) :
     # Lock with a simple call to lockf() - this blocks until the lock is aquired
     try:
         fcntl.lockf( file, fcntl.LOCK_EX )
     except IOError, exc_value:
         print "Problem when trying to lock {0}, IOError {1}".format(file, exc_value[0])
         raise
Пример #26
0
 def session_write(self):
     """ Writes the locked set to the session file. 
         The global lock MUST be held for this function to work, although on NFS additional
         locking is done
         Raises RepositoryError if session file is inaccessible """
     # logger.debug("Openining Session File: %s " % self.fn )
     try:
         # If this fails, we want to shutdown the repository (corruption
         # possible)
         fd = self.delayopen(self.fn)
         if not self.afs:
             fcntl.lockf(fd, fcntl.LOCK_EX)
         self.delaywrite(fd, pickle.dumps(self.locked))
         if not self.afs:
             fcntl.lockf(fd, fcntl.LOCK_UN)
         os.fsync(fd)
         os.close(fd)
     except OSError as x:
         if x.errno != errno.ENOENT:
             raise RepositoryError(self.repo, "Error on session file access '%s': %s" % (self.fn, x))
         else:
             # logger.debug( "File NOT found %s" %self.fn )
             raise RepositoryError(
                 self.repo,
                 "SessionWrite: Own session file not found! Possibly deleted by another ganga session.\n\
                                 Possible reasons could be that this computer has a very high load, or that the system clocks on computers running Ganga are not synchronized.\n\
                                 On computers with very high load and on network filesystems, try to avoid running concurrent ganga sessions for long.\n '%s' : %s"
                 % (self.fn, x),
             )
     except IOError as x:
         raise RepositoryError(self.repo, "Error on session file locking '%s': %s" % (self.fn, x))
Пример #27
0
 def cnt_read(self):
     """ Tries to read the counter file.
         Raises ValueError (invalid contents)
         Raises OSError (no access/does not exist)
         Raises RepositoryError (fatal)
         """
     try:
         fd = os.open(self.cntfn, os.O_RDONLY)
         try:
             if not self.afs:  # additional locking for NFS
                 fcntl.lockf(fd, fcntl.LOCK_SH)
             # 100 bytes should be enough for any ID. Can raise ValueErrorr
             return int(os.read(fd, 100).split("\n")[0])
         finally:
             if not self.afs:  # additional locking for NFS
                 fcntl.lockf(fd, fcntl.LOCK_UN)
             os.close(fd)
     except OSError as x:
         if x.errno != errno.ENOENT:
             raise RepositoryError(self.repo, "OSError on count file '%s' read: %s" % (self.cntfn, x))
         else:
             # This can be a recoverable error, depending on where it occurs
             raise
     except IOError as x:
         raise RepositoryError(self.repo, "Locking error on count file '%s' write: %s" % (self.cntfn, x))
Пример #28
0
 def _yumCachePreYumHook(self):
     try:
         fcntl.lockf(self.yumCacheLock.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
     except IOError:
         self.state.start("Waiting for yumcache lock")
         fcntl.lockf(self.yumCacheLock.fileno(), fcntl.LOCK_EX)
         self.state.finish("Waiting for yumcache lock")
Пример #29
0
 def session_read(self, fn):
     """ Reads a session file and returns a set of IDs locked by that session.
         The global lock MUST be held for this function to work, although on NFS additional
         locking is done
         Raises RepositoryError if severe access problems occur (corruption otherwise!) """
     try:
         # This can fail (thats OK, file deleted in the meantime)
         fd = self.delay_session_open(fn)
         os.lseek(fd, 0, 0)
         try:
             if not self.afs:  # additional locking for NFS
                 fcntl.lockf(fd, fcntl.LOCK_SH)
             try:
                 # 00)) # read up to 1 MB (that is more than enough...)
                 return pickle.loads(os.read(fd, 1048576))
             except Exception as x:
                 logger.warning(
                     "corrupt or inaccessible session file '%s' - ignoring it (Exception %s %s)."
                     % (fn, x.__class__.__name__, str(x))
                 )
         finally:
             if not self.afs:  # additional locking for NFS
                 fcntl.lockf(fd, fcntl.LOCK_UN)
             os.close(fd)
     except OSError as x:
         if x.errno != errno.ENOENT:
             raise RepositoryError(self.repo, "Error on session file access '%s': %s" % (fn, x))
     return set()
Пример #30
0
def clean_ssl_keyring(keyring, session):
    old = open(keyring, 'r+')
    fcntl.lockf(old, fcntl.LOCK_EX)

    new = open(keyring + '.tmp', 'w')
    fcntl.lockf(new, fcntl.LOCK_EX)

    for line in old:
        if "-BEGIN CERTIFICATE-" in line:
            der = b""
            pem = line
        elif "-END CERTIFICATE-" in line:
            pem += line
            fingerprint = sha1(der).hexdigest().upper()
            builder = session.query(Builder).filter_by(ssl=fingerprint).first()
            user = session.query(Person).filter_by(ssl=fingerprint).first()
            if builder or user:
                new.write(pem)
        else:
            der += b64decode(line.strip())
            pem += line

    new.close()
    os.rename(keyring + '.tmp', keyring)
    old.close()
Пример #31
0
    def exec_command(self, cmd, tmp_path, sudo_user,sudoable=False, executable='/bin/sh'):
        ''' run a command on the remote host '''

        ssh_cmd = self._password_cmd()
        ssh_cmd += ["ssh", "-tt", "-q"] + self.common_args
        if self.ipv6:
            ssh_cmd += ['-6']
        ssh_cmd += [self.host]

        if not self.runner.sudo or not sudoable:
            if executable:
                ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
            else:
                ssh_cmd.append(cmd)
        else:
            sudocmd, prompt = utils.make_sudo_cmd(sudo_user, executable, cmd)
            ssh_cmd.append(sudocmd)

        vvv("EXEC %s" % ssh_cmd, host=self.host)

        not_in_host_file = self.not_in_host_file(self.host)

        if C.HOST_KEY_CHECKING and not_in_host_file:
            # lock around the initial SSH connectivity so the user prompt about whether to add 
            # the host to known hosts is not intermingled with multiprocess output.
            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
        


        try:
            # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
            import pty
            master, slave = pty.openpty()
            p = subprocess.Popen(ssh_cmd, stdin=slave,
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            stdin = os.fdopen(master, 'w', 0)
        except:
            p = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            stdin = p.stdin

        self._send_password()

        if self.runner.sudo and sudoable and self.runner.sudo_pass:
            fcntl.fcntl(p.stdout, fcntl.F_SETFL,
                        fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
            sudo_output = ''
            while not sudo_output.endswith(prompt):
                rfd, wfd, efd = select.select([p.stdout], [],
                                              [p.stdout], self.runner.timeout)
                if p.stdout in rfd:
                    chunk = p.stdout.read()
                    if not chunk:
                        raise errors.AnsibleError('ssh connection closed waiting for sudo password prompt')
                    sudo_output += chunk
                else:
                    stdout = p.communicate()
                    raise errors.AnsibleError('ssh connection error waiting for sudo password prompt')
            stdin.write(self.runner.sudo_pass + '\n')
            fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)

        # We can't use p.communicate here because the ControlMaster may have stdout open as well
        stdout = ''
        stderr = ''
        while True:
            rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], 1)

            # fail early if the sudo password is wrong
            if self.runner.sudo and sudoable and self.runner.sudo_pass:
                incorrect_password = gettext.dgettext(
                    "sudo", "Sorry, try again.")
                if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
                    raise errors.AnsibleError('Incorrect sudo password') 

            if p.stdout in rfd:
                dat = os.read(p.stdout.fileno(), 9000)
                stdout += dat
                if dat == '':
                    p.wait()
                    break
            elif p.stderr in rfd:
                dat = os.read(p.stderr.fileno(), 9000)
                stderr += dat
                if dat == '':
                    p.wait()
                    break
            elif p.poll() is not None:
                break
        stdin.close() # close stdin after we read from stdout (see also issue #848)
        
        if C.HOST_KEY_CHECKING and not_in_host_file:
            # lock around the initial SSH connectivity so the user prompt about whether to add 
            # the host to known hosts is not intermingled with multiprocess output.
            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)

        if p.returncode != 0 and stderr.find('Bad configuration option: ControlPersist') != -1:
            raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ansible_ssh_args in the config file) before running again')

        return (p.returncode, '', stdout, stderr)
Пример #32
0
 def unlock(self, lock_file):
     fcntl.lockf(lock_file, fcntl.LOCK_UN)
Пример #33
0
 def lock(self, lock_file):
     fcntl.lockf(lock_file, fcntl.LOCK_EX)
Пример #34
0
            for raw_file in raw_files:
                os.unlink(raw_file)
       
        # slc files can be deleted if *.rdr exists
        rdr_files = glob("%s/*.rdr" % ci_dir)
        if len(rdr_files) > 0:
            for slc_file in slc_files:
                os.unlink(slc_file)


if __name__ == "__main__":

    # lock so that only one instance can run
    lock_file = "/tmp/janitor.lock"
    f = open(lock_file, 'w')
    f.write("%d\n" % os.getpid())
    f.flush()
    try: fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
    except IOError, e:
        if e.errno == errno.EAGAIN:
            sys.stderr.write("Janitor is already running.\n")
            sys.exit(-1)
        raise

    # run cleanup
    root_work_dir = "/data/work/jobs"
    try: janitor(root_work_dir)
    finally:
        f.close()
        os.unlink(lock_file)
Пример #35
0
 def __del__(self):
     if self.lock_f:
         fcntl.lockf(self.lock_f, fcntl.LOCK_UN)
         self.lock_f.close()
         os.unlink('/tmp/scheduler.lock')
Пример #36
0
def main():
    p = optparse.OptionParser()
    p.add_option('-o',
                 '--stdout',
                 dest='stdout',
                 help="""Read stdout from FILE.""",
                 metavar='FILE')
    p.add_option('-e',
                 '--stderr',
                 dest='stderr',
                 help="""Read stderr from FILE.""",
                 metavar='FILE')
    p.add_option('-s',
                 '--status',
                 dest='status',
                 metavar='FILE',
                 help='Get process exit status from FILE. '
                 'Will block until a shared lock is acquired on FILE.')
    p.add_option('-d',
                 '--delete',
                 dest='delete',
                 action='store_true',
                 help='Delete stdout, stderr, and status files when finished.')
    options, args = p.parse_args()
    if args:
        sys.stderr.write('Unexpected arguments: {0}\n'.format(args))
        return 1

    missing = []
    for option in ('stdout', 'stderr', 'status'):
        if getattr(options, option) is None:
            missing.append(option)

    if missing:
        p.print_usage()
        msg = 'Missing required flag(s): {0}\n'.format(', '.join(
            '--' + i for i in missing))
        sys.stderr.write(msg)
        return 1

    with open(options.stdout, 'r') as stdout:
        with open(options.stderr, 'r') as stderr:
            with open(options.status, 'r') as status:
                fcntl.lockf(status, fcntl.LOCK_SH)
                return_code_str = status.read()

            if return_code_str:
                return_code = int(return_code_str)
            else:
                print >> sys.stderr, 'WARNING: wrapper script interrupted.'
                return_code = 1

            stderr_copier = threading.Thread(target=shutil.copyfileobj,
                                             args=[stderr, sys.stderr],
                                             name='stderr-copier')
            stderr_copier.daemon = True
            stderr_copier.start()
            try:
                shutil.copyfileobj(stdout, sys.stdout)
            finally:
                stderr_copier.join()

    if options.delete:
        for f in [options.stdout, options.stderr, options.status]:
            os.unlink(f)

    return return_code
Пример #37
0
def run(*args, **kwargs):
    """The rss2email command line interface

    Arguments passed to this function are forwarded to the parser's
    `.parse_args()` call without modification.
    """
    parser = _argparse.ArgumentParser(prog='rss2email',
                                      description=_PACKAGE_DOCSTRING)

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s {}'.format(__version__))
    parser.add_argument(
        '--full-version',
        action=FullVersionAction,
        nargs=0,
        help='print the version information of all related packages and exit')
    parser.add_argument('-c',
                        '--config',
                        metavar='PATH',
                        default=[],
                        action='append',
                        help='path to the configuration file')
    parser.add_argument('-d',
                        '--data',
                        metavar='PATH',
                        help='path to the feed data file')
    parser.add_argument('-V',
                        '--verbose',
                        default=0,
                        action='count',
                        help='increment verbosity')
    subparsers = parser.add_subparsers(title='commands')

    new_parser = subparsers.add_parser(
        'new', help=_command.new.__doc__.splitlines()[0])
    new_parser.set_defaults(func=_command.new)
    new_parser.add_argument(
        'email',
        nargs='?',
        help='default target email for the new feed database')

    email_parser = subparsers.add_parser(
        'email', help=_command.email.__doc__.splitlines()[0])
    email_parser.set_defaults(func=_command.email)
    email_parser.add_argument(
        'email',
        default='',
        help='default target email for the email feed database')

    add_parser = subparsers.add_parser(
        'add', help=_command.add.__doc__.splitlines()[0])
    add_parser.set_defaults(func=_command.add)
    add_parser.add_argument('name', help='name of the new feed')
    add_parser.add_argument('url', help='location of the new feed')
    add_parser.add_argument('email',
                            nargs='?',
                            help='target email for the new feed')
    add_parser.add_argument('--only-new',
                            action='store_true',
                            help="entries in the feed now will not be sent")

    run_parser = subparsers.add_parser(
        'run', help=_command.run.__doc__.splitlines()[0])
    run_parser.set_defaults(func=_command.run)
    run_parser.add_argument('-n',
                            '--no-send',
                            dest='send',
                            default=True,
                            action='store_const',
                            const=False,
                            help="fetch feeds, but don't send email")
    run_parser.add_argument('--clean',
                            action='store_true',
                            help='clean old feed entries')
    run_parser.add_argument(
        'index',
        nargs='*',
        help='feeds to fetch (defaults to fetching all feeds)')

    list_parser = subparsers.add_parser(
        'list', help=_command.list.__doc__.splitlines()[0])
    list_parser.set_defaults(func=_command.list)

    pause_parser = subparsers.add_parser(
        'pause', help=_command.pause.__doc__.splitlines()[0])
    pause_parser.set_defaults(func=_command.pause)
    pause_parser.add_argument(
        'index',
        nargs='*',
        help='feeds to pause (defaults to pausing all feeds)')

    unpause_parser = subparsers.add_parser(
        'unpause', help=_command.unpause.__doc__.splitlines()[0])
    unpause_parser.set_defaults(func=_command.unpause)
    unpause_parser.add_argument(
        'index',
        nargs='*',
        help='feeds to ununpause (defaults to unpausing all feeds)')

    delete_parser = subparsers.add_parser(
        'delete', help=_command.delete.__doc__.splitlines()[0])
    delete_parser.set_defaults(func=_command.delete)
    delete_parser.add_argument('index', nargs='+', help='feeds to delete')

    reset_parser = subparsers.add_parser(
        'reset', help=_command.reset.__doc__.splitlines()[0])
    reset_parser.set_defaults(func=_command.reset)
    reset_parser.add_argument(
        'index',
        nargs='*',
        help='feeds to reset (defaults to resetting all feeds)')

    opmlimport_parser = subparsers.add_parser(
        'opmlimport', help=_command.opmlimport.__doc__.splitlines()[0])
    opmlimport_parser.set_defaults(func=_command.opmlimport)
    opmlimport_parser.add_argument(
        'file',
        metavar='PATH',
        nargs='?',
        help='path for imported OPML (defaults to stdin)')

    opmlexport_parser = subparsers.add_parser(
        'opmlexport', help=_command.opmlexport.__doc__.splitlines()[0])
    opmlexport_parser.set_defaults(func=_command.opmlexport)
    opmlexport_parser.add_argument(
        'file',
        metavar='PATH',
        nargs='?',
        help='path for exported OPML (defaults to stdout)')

    args = parser.parse_args(*args, **kwargs)

    if args.verbose:
        _LOG.setLevel(max(_logging.DEBUG, _logging.ERROR - 10 * args.verbose))

    # https://docs.python.org/3/library/logging.html#logrecord-attributes
    formatter = _logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
    for handler in _LOG.handlers:  # type: _logging.Handler
        handler.setFormatter(formatter)

    if not getattr(args, 'func', None):
        parser.error('too few arguments')

    # Immediately lock so only one r2e instance runs at a time
    if UNIX:
        import fcntl as _fcntl
        from pathlib import Path as _Path
        dir = _os.environ.get("XDG_RUNTIME_DIR")
        if dir is None:
            dir = _os.path.join("/tmp", "rss2email-{}".format(_os.getuid()))
            _Path(dir).mkdir(mode=0o700, parents=True, exist_ok=True)
        lockfile_path = _os.path.join(dir, "rss2email.lock")
        lockfile = open(lockfile_path, "w")
        _fcntl.lockf(lockfile, _fcntl.LOCK_EX)
        _LOG.debug("acquired lock file {}".format(lockfile_path))
    else:
        # TODO: What to do on Windows?
        lockfile = None

    try:
        if not args.config:
            args.config = None
        feeds = _feeds.Feeds(datafile_path=args.data, configfiles=args.config)
        if args.func != _command.new:
            feeds.load()
        if not args.verbose:
            _LOG.setLevel(feeds.config['DEFAULT']['verbose'].upper())
        args.func(feeds=feeds, args=args)
    except _error.RSS2EmailError as e:
        e.log()
        if _logging.ERROR - 10 * args.verbose < _logging.DEBUG:
            raise  # don't mask the traceback
        _sys.exit(1)
    finally:
        if feeds is not None:
            feeds.close()
        if lockfile is not None:
            lockfile.close()
Пример #38
0
    def __init__(self, server=None, palette='default',
                 keymap='default', debug=False, verbose=False,
                 disable_sync=False, disable_background_sync=False,
                 fetch_missing_refs=False,
                 path=config.DEFAULT_CONFIG_PATH):
        self.server = server
        self.config = config.Config(server, palette, keymap, path)
        if debug:
            level = logging.DEBUG
        elif verbose:
            level = logging.INFO
        else:
            level = logging.WARNING
        logging.basicConfig(filename=self.config.log_file, filemode='w',
                            format='%(asctime)s %(message)s',
                            level=level)
        # Python2.6 Logger.setLevel doesn't convert string name
        # to integer code. Here, we set the requests logger level to
        # be less verbose, since our logging output duplicates some
        # requests logging content in places.
        req_level_name = 'WARN'
        req_logger = logging.getLogger('requests')
        if sys.version_info < (2, 7):
            level = logging.getLevelName(req_level_name)
            req_logger.setLevel(level)
        else:
            req_logger.setLevel(req_level_name)
        self.log = logging.getLogger('gertty.App')
        self.log.debug("Starting")

        self.lock_fd = open(self.config.lock_file, 'w')
        try:
            fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            print("error: another instance of gertty is running for: %s" % self.config.server['name'])
            sys.exit(1)

        self.project_cache = ProjectCache()
        self.ring = mywid.KillRing()
        self.input_buffer = []
        webbrowser.register('xdg-open', None, BackgroundBrowser("xdg-open"))

        self.fetch_missing_refs = fetch_missing_refs
        self.config.keymap.updateCommandMap()
        self.search = search.SearchCompiler(self.config.username)
        self.db = db.Database(self, self.config.dburi, self.search)
        self.sync = sync.Sync(self, disable_background_sync)

        self.status = StatusHeader(self)
        self.header = urwid.AttrMap(self.status, 'header')
        self.screens = urwid.MonitoredList()
        self.breadcrumbs = BreadCrumbBar()
        self.screens.set_modified_callback(
            functools.partial(self.breadcrumbs._update, self.screens))
        if self.config.breadcrumbs:
            self.footer = urwid.AttrMap(self.breadcrumbs, 'footer')
        else:
            self.footer = None
        screen = view_project_list.ProjectListView(self)
        self.status.update(title=screen.title)
        self.updateStatusQueries()
        self.frame = urwid.Frame(body=screen, footer=self.footer)
        self.loop = urwid.MainLoop(self.frame, palette=self.config.palette.getPalette(),
                                   handle_mouse=self.config.handle_mouse,
                                   unhandled_input=self.unhandledInput,
                                   input_filter=self.inputFilter)

        self.sync_pipe = self.loop.watch_pipe(self.refresh)
        self.error_queue = queue.Queue()
        self.error_pipe = self.loop.watch_pipe(self._errorPipeInput)
        self.logged_warnings = set()
        self.command_pipe = self.loop.watch_pipe(self._commandPipeInput)
        self.command_queue = queue.Queue()

        warnings.showwarning = self._showWarning

        has_subscribed_projects = False
        with self.db.getSession() as session:
            if session.getProjects(subscribed=True):
                has_subscribed_projects = True
        if not has_subscribed_projects:
            self.welcome()

        self.loop.screen.tty_signal_keys(start='undefined', stop='undefined')
        #self.loop.screen.set_terminal_properties(colors=88)

        self.startSocketListener()

        if not disable_sync:
            self.sync_thread = threading.Thread(target=self.sync.run, args=(self.sync_pipe,))
            self.sync_thread.daemon = True
            self.sync_thread.start()
        else:
            self.sync_thread = None
            self.sync.offline = True
            self.status.update(offline=True)
Пример #39
0
 def Unlock(self):
   """Release our pid file."""
   fcntl.lockf(self._file, fcntl.LOCK_UN)
   self._locked = False
Пример #40
0
 def _yumCachePostYumHook(self):
     fcntl.lockf(self.yumCacheLock.fileno(), fcntl.LOCK_UN)
Пример #41
0
 def __exit__(self, *exc_info):
     fcntl.lockf(self.lockf, fcntl.LOCK_UN)
     self.lockf.close()
Пример #42
0
class PidFile(object):
  """Interprocess locking via fcntl and a pid file.

  We use fcntl to manage locks between processes, as the kernel will
  release the lock when the process dies no matter what, so it works
  quite well.

  We store the pid in the file we use so that 3rd party programs,
  primarily small shell scripts, can easily see who has (or had) the
  lock via the stored pid.  We don't clean the pid up on exit
  because most programs will have to check if the program is still
  running anyways.

  We can forcibly take a lock by deleting the file and re-creating
  it.  When we do so, we check if the pid in the file is running and
  send it a SIGTERM *if and only if* it has a commandline with
  'nsscache' somewhere in the string.

  We try to kill the process to avoid it completing after us and
  overwriting any changes.  We check for 'nsscache' to avoid killing
  a re-used PID.  We are not paranoid, we send the SIGTERM and
  assume it dies.

  WARNING:  Use over NFS with *extreme* caution.  fcntl locking can
  be configured to work, but your mileage can and will vary.
  """

  STATE_DIR = '/var/run'
  PROC_DIR = '/proc'
  PROG_NAME = 'nsscache'

  def __init__(self, filename=None, pid=None):
    """Initialize the PidFile object."""
    self._locked = False
    self._file = None
    self.filename = filename
    self.pid = pid

    # Setup logging.
    self.log = logging.getLogger(self.__class__.__name__)

    if self.pid is None:
      self.pid = os.getpid()

    # If no filename is given, default to the basename we were
    # invoked with.
    if self.filename is None:
      basename = os.path.basename(sys.argv[0])
      if not basename:
        # We were invoked from a python interpreter with
        # bad arguments, or otherwise loaded without sys.argv
        # being set.
        self.log.critical('Can not determine lock file name!')
        raise TypeError('missing required argument: filename')
      self.filename = '%s/%s' % (self.STATE_DIR, basename)

    self.log.debug('using %s for lock file', self.filename)

  def __del__(self):
    """Release our pid file on object destruction."""
    if self.Locked():
      self.Unlock()

  def _Open(self, filename=None):
    """Create our file and store the file object."""
    if filename is None:
      filename = self.filename

    # We want to create this file if it doesn't exist, but 'w'
    # will truncate, so we use 'a+' and seek.  We don't truncate
    # the file because we haven't tested if it is locked by
    # another program yet, this is done later by fcntl module.
    self._file = open(filename, 'a+')
    self._file.seek(0)

    # Set permissions.
    os.chmod(filename,
             stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)

  def Lock(self, force=False):
    """Open our pid file and lock it.

    Args:
      force: optional flag to override the lock.
    Returns:
      True if successful
      False otherwise
    """
    if self._file is None:
      # Open the file and trap permission denied.
      try:
        self._Open()
      except IOError, e:
        if e.errno == errno.EACCES:
          self.log.warning('Permission denied opening lock file: %s',
                           self.filename)
          return False
        raise

    # Try to get the lock.
    return_val = False
    try:
      fcntl.lockf(self._file, fcntl.LOCK_EX | fcntl.LOCK_NB)
      return_val = True
    except IOError, e:
      if e.errno in [errno.EACCES, errno.EAGAIN]:
        # Catch the error raised when the file is locked.
        if not force:
          self.log.debug('%s already locked!', self.filename)
          return False
      else:
        # Otherwise re-raise it.
        raise
Пример #43
0
async def main():
    """
    Main function of aucote project
    Returns:

    """
    print("%s, version: %s.%s.%s" % ((APP_NAME,) + VERSION))

    # parse arguments
    parser = argparse.ArgumentParser(description='Tests compliance of devices.')
    parser.add_argument("--cfg", help="config file path")
    parser.add_argument('cmd', help="aucote command", type=str, default='service',
                        choices=['scan', 'service'],
                        nargs='?')
    parser.add_argument("--syncdb", action="store_true", help="Synchronize database")
    args = parser.parse_args()

    # read configuration
    await cfg_load(args.cfg)

    log.info("%s, version: %s.%s.%s", APP_NAME, *VERSION)

    try:
        lock = open(cfg['pid_file'], 'w')
        fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError:
        log.error("There is another Aucote instance running already")
        sys.exit(1)

    exploit_filename = cfg['fixtures.exploits.filename']
    try:
        exploits = Exploits.read(file_name=exploit_filename)
    except NmapUnsupported:
        log.exception("Cofiguration seems to be invalid. Check ports and services or contact with collective-sense")
        exit(1)

    def get_kuduworker():
        """
        Get kudu worker if enable, mock otherwise

        Returns:
            KuduQueue|MagicMock

        """
        if cfg['kuduworker.enable']:
            return KuduQueue(cfg['kuduworker.queue.address'])
        return MagicMock()  # used for local testing

    with get_kuduworker() as kudu_queue:
        aucote = Aucote(exploits=exploits, kudu_queue=kudu_queue, tools_config=EXECUTOR_CONFIG)

        if args.syncdb:
            log.info('Pushing %s exploits to the database', len(exploits))
            aucote.run_syncdb()

        if args.cmd == 'scan':
            await aucote.run_scan(as_service=False)
            IOLoop.current().stop()
        elif args.cmd == 'service':
            while True:
                await aucote.run_scan()
                cfg.reload(cfg['config_filename'])
Пример #44
0
                            finalTfile.replace('Playback', 'Playback_full'))
                        Tfile = dqmfile

                    for i in range(RETRIES):
                        md5Digest = md5(file(Tfile).read())
                        originStr = "md5:%s %d %s" % (md5Digest.hexdigest(),
                                                      os.stat(Tfile).st_size,
                                                      Tfile)
                        originTMPFile = "%s.origin" % finalTMPfile
                        originFile = open(originTMPFile, "w")
                        originFile.write(originStr)
                        originFile.close()
                        shutil.copy(Tfile, finalTMPfile)
                        version = 1
                        lFile = open("%s/lock" % TMPDROPBOX, "a")
                        lockf(lFile, LOCK_EX)
                        for vdir, vsubdir, vfiles in os.walk(DROPBOX):
                            if 'DQM_V0001_%s_R%09d.root' % (subsystem,
                                                            run) not in vfiles:
                                continue
                            version += 1

                        if not os.path.exists("%s/V%04d" % (DROPBOX, version)):
                            os.makedirs("%s/V%04d" % (DROPBOX, version))

                        finalfile = "%s/V%04d/DQM_V0001_%s_R%09d.root" % (
                            DROPBOX, version, subsystem, run)
                        originFileName = "%s.origin" % finalfile
                        if os.path.exists(finalTMPfile) and os.stat(
                                finalTMPfile).st_size == os.stat(
                                    Tfile).st_size:
Пример #45
0
def unlock_path(lock):
    if os.name != 'posix':
        return None
    fcntl.lockf(lock, fcntl.LOCK_UN)
    lock.close()
                User.update_data(user_json.id, user_json)
        except tweepy.error.TweepError:
            # API might return errors occasionally
            print_exc()
            time.sleep(30)
    end_time = time.time()
    print('')
    print('[{}] updated={} time_elapsed={:.2f}'.format(
        time.strftime('%c'), update_count, (end_time - start_time)))


if __name__ == '__main__':
    # File lock, only run the script once at a time
    f = open('user_lock', 'w')
    try:
        fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if e.errno == errno.EAGAIN:
            sys.stderr.write(
                '[{}] Script refresh_user_data already running.\n'.format(
                    time.strftime('%c')))
            sys.exit(-1)
        raise
    # Create new flag tells the script if it should scan the tweet table for new users
    create_new = True if 'create' in sys.argv else False
    if 'update_followers' in sys.argv:
        print('[{}] Start refresh_user_followers'.format(time.strftime('%c')))
        refresh_user_followers()
    else:
        print('[{}] Start refresh_user_data'.format(time.strftime('%c')))
        refresh_user_data(create_new)
Пример #47
0
    def get_status(self, checkpoint_time=0):
        """
        Monitor Status --->        Created    Started  Paused      Stopped
        ----------------------------------------------------------------------
        slave_node                 N/A        VALUE    VALUE       N/A
        status                     Created    VALUE    Paused      Stopped
        last_synced                N/A        VALUE    VALUE       VALUE
        crawl_status               N/A        VALUE    N/A         N/A
        entry                      N/A        VALUE    N/A         N/A
        data                       N/A        VALUE    N/A         N/A
        meta                       N/A        VALUE    N/A         N/A
        failures                   N/A        VALUE    VALUE       VALUE
        checkpoint_completed       N/A        VALUE    VALUE       VALUE
        checkpoint_time            N/A        VALUE    VALUE       VALUE
        checkpoint_completed_time  N/A        VALUE    VALUE       VALUE
        """
        data = self.default_values
        with open(self.filename) as f:
            try:
                data.update(json.load(f))
            except ValueError:
                pass
        monitor_status = self.get_monitor_status()

        # Verifying whether monitor process running and adjusting status
        if monitor_status in ["Started", "Paused"]:
            try:
                with open(self.monitor_pid_file, "r+") as f:
                    fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    monitor_status = "Stopped"
            except (IOError, OSError) as e:
                if e.errno in (EACCES, EAGAIN):
                    # cannot grab. so, monitor process still running..move on
                    pass
                else:
                    raise

        if monitor_status in ["Created", "Paused", "Stopped"]:
            data["worker_status"] = monitor_status

        # Checkpoint adjustments
        if checkpoint_time == 0:
            data["checkpoint_completed"] = DEFAULT_STATUS
            data["checkpoint_time"] = DEFAULT_STATUS
            data["checkpoint_completion_time"] = DEFAULT_STATUS
        else:
            if checkpoint_time != data["checkpoint_time"]:
                if checkpoint_time <= data["last_synced"]:
                    data["checkpoint_completed"] = "Yes"
                    data["checkpoint_time"] = checkpoint_time
                    data["checkpoint_completion_time"] = data["last_synced"]
                else:
                    data["checkpoint_completed"] = "No"
                    data["checkpoint_time"] = checkpoint_time
                    data["checkpoint_completion_time"] = DEFAULT_STATUS

        if data["checkpoint_time"] not in [0, DEFAULT_STATUS]:
            chkpt_time = data["checkpoint_time"]
            data["checkpoint_time"] = human_time(chkpt_time)
            data["checkpoint_time_utc"] = human_time_utc(chkpt_time)

        if data["checkpoint_completion_time"] not in [0, DEFAULT_STATUS]:
            chkpt_completion_time = data["checkpoint_completion_time"]
            data["checkpoint_completion_time"] = human_time(
                chkpt_completion_time)
            data["checkpoint_completion_time_utc"] = human_time_utc(
                chkpt_completion_time)

        if data["last_synced"] == 0:
            data["last_synced"] = DEFAULT_STATUS
            data["last_synced_utc"] = DEFAULT_STATUS
        else:
            last_synced = data["last_synced"]
            data["last_synced"] = human_time(last_synced)
            data["last_synced_utc"] = human_time_utc(last_synced)

        if data["worker_status"] != "Active":
            data["last_synced"] = DEFAULT_STATUS
            data["last_synced_utc"] = DEFAULT_STATUS
            data["crawl_status"] = DEFAULT_STATUS
            data["entry"] = DEFAULT_STATUS
            data["data"] = DEFAULT_STATUS
            data["meta"] = DEFAULT_STATUS
            data["failures"] = DEFAULT_STATUS
            data["checkpoint_completed"] = DEFAULT_STATUS
            data["checkpoint_time"] = DEFAULT_STATUS
            data["checkpoint_completed_time"] = DEFAULT_STATUS
            data["checkpoint_time_utc"] = DEFAULT_STATUS
            data["checkpoint_completion_time_utc"] = DEFAULT_STATUS

        if data["worker_status"] not in ["Active", "Passive"]:
            data["slave_node"] = DEFAULT_STATUS

        if data.get("last_synced_utc", 0) == 0:
            data["last_synced_utc"] = DEFAULT_STATUS

        if data.get("checkpoint_completion_time_utc", 0) == 0:
            data["checkpoint_completion_time_utc"] = DEFAULT_STATUS

        if data.get("checkpoint_time_utc", 0) == 0:
            data["checkpoint_time_utc"] = DEFAULT_STATUS

        return data
Пример #48
0
 try:
     st = os.lstat(fn)
 except OSError, e:
     if e.errno == errno.ENOENT:
         pass
     else:
         raise
 else:
     if not stat.S_ISREG(st.st_mode):
         raise BX(_("destination not a file: %s" % fn))
 fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0666)
 # log_error("fd=%r" %fd)
 try:
     if offset == 0 or (offset == -1 and size == len(contents)):
         #truncate file
         fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
         try:
             os.ftruncate(fd, 0)
             # log_error("truncating fd %r to 0" %fd)
         finally:
             fcntl.lockf(fd, fcntl.LOCK_UN)
     if offset == -1:
         os.lseek(fd, 0, 2)
     else:
         os.lseek(fd, offset, 0)
     #write contents
     fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, len(contents), 0, 2)
     try:
         os.write(fd, contents)
         # log_error("wrote contents")
     finally:
Пример #49
0
 def unlock(self):
     """<method internal="yes">
             </method>"""
     fcntl.lockf(self.lock_file, fcntl.LOCK_UN)
Пример #50
0
    def lock(self):
        if self.fd:
            return

        self.fd = open(self.filename, 'w')
        fcntl.lockf(self.fd, fcntl.LOCK_EX)
Пример #51
0
 def unlock(self):
     fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
Пример #52
0
    def close(self):
        ''' terminate the connection '''

        cache_key = self._cache_key()
        SSH_CONNECTION_CACHE.pop(cache_key, None)
        SFTP_CONNECTION_CACHE.pop(cache_key, None)

        if self.sftp is not None:
            self.sftp.close()

        if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added(
        ):

            # add any new SSH host keys -- warning -- this could be slow
            # (This doesn't acquire the connection lock because it needs
            # to exclude only other known_hosts writers, not connections
            # that are starting up.)
            lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
            dirname = os.path.dirname(self.keyfile)
            makedirs_safe(dirname)

            KEY_LOCK = open(lockfile, 'w')
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)

            try:
                # just in case any were added recently

                self.ssh.load_system_host_keys()
                self.ssh._host_keys.update(self.ssh._system_host_keys)

                # gather information about the current key file, so
                # we can ensure the new file has the correct mode/owner

                key_dir = os.path.dirname(self.keyfile)
                if os.path.exists(self.keyfile):
                    key_stat = os.stat(self.keyfile)
                    mode = key_stat.st_mode
                    uid = key_stat.st_uid
                    gid = key_stat.st_gid
                else:
                    mode = 33188
                    uid = os.getuid()
                    gid = os.getgid()

                # Save the new keys to a temporary file and move it into place
                # rather than rewriting the file. We set delete=False because
                # the file will be moved into place rather than cleaned up.

                tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir,
                                                          delete=False)
                os.chmod(tmp_keyfile.name, mode & 0o7777)
                os.chown(tmp_keyfile.name, uid, gid)

                self._save_ssh_host_keys(tmp_keyfile.name)
                tmp_keyfile.close()

                os.rename(tmp_keyfile.name, self.keyfile)

            except:

                # unable to save keys, including scenario when key was invalid
                # and caught earlier
                traceback.print_exc()
                pass
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)

        self.ssh.close()
Пример #53
0
                    os.unlink(self.lockfile)
                self.fd = os.open(self.lockfile,
                                  os.O_CREAT | os.O_EXCL | os.O_RDWR)
            except Exception, e:
                Log.note(
                    "\n" +
                    "**********************************************************************\n"
                    + "** Another instance is already running, quitting.\n" +
                    "**********************************************************************\n"
                )
                sys.exit(-1)
        else:  # non Windows
            import fcntl
            self.fp = open(self.lockfile, 'w')
            try:
                fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                Log.note(
                    "\n" +
                    "**********************************************************************\n"
                    + "** Another instance is already running, quitting.\n" +
                    "**********************************************************************\n"
                )
                sys.exit(-1)
        self.initialized = True

    def __exit__(self, type, value, traceback):
        self.__del__()

    def __del__(self):
        import sys
Пример #54
0
def release():
    # 1. 对于文件的 close() 操作会使文件锁失效;
    # 2. 同理,进程结束后文件锁失效
    # f.close()
    fcntl.lockf(f, fcntl.LOCK_UN)
Пример #55
0
 def acquire(self):
     """Lock the opened file"""
     fcntl.lockf(self.file,
                 fcntl.LOCK_EX | (fcntl.LOCK_NB if self.nonblock else 0))
Пример #56
0
 def trylock(self):
     fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
Пример #57
0
 def __init__(self, path):
     self.lock_file = open(path, "w")
     if fcntl:
         fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
Пример #58
0
 def release(self):
     """Unlock the file and close the file object"""
     fcntl.lockf(self.file, fcntl.LOCK_UN)
     self.file.close()
Пример #59
0
def lock_pid_file():
    """Lock the flashfocus PID file."""
    fcntl.lockf(PID, fcntl.LOCK_EX | fcntl.LOCK_NB)
Пример #60
0
 def release(self):
     import fcntl
     fcntl.lockf(self.fd, fcntl.LOCK_UN)