Пример #1
0
def lock_file(pidfile):
    """Actually the code below is needless..."""

    import fcntl
    try:
        fp = open(pidfile, "r+" if os.path.isfile(pidfile) else "w+")
    except IOError:
        raise MirrorError("Can't open or create %s", pidfile)

    try:
        fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError:
        try:
            pid = int(fp.read().strip())
        except:
            raise MirrorError("Can't lock %s", pidfile)
        raise MirrorError("Can't lock %s, maybe another mirrord with pid %d is running",
                              pidfile, pid)

    fcntl.fcntl(fp, fcntl.F_SETFD, 1)
    fp.seek(0)
    fp.write("%d\n" % os.getpid())
    fp.truncate()
    fp.flush()

    # We need to return fp to keep a reference on it
    return fp
Пример #2
0
    def __enter__(self):
        # Create directory if it doesn't exist
        dir_name = os.path.dirname(FILE_NAME)
        try:
            os.makedirs(dir_name)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        # Open file with a lock and create it if it doesn't exist
        flag = os.O_RDWR if self._write is True else os.O_RDONLY
        mode = "rb+" if self._write is True else "rb"
        self._file = os.fdopen(os.open(FILE_NAME, os.O_CREAT | flag), mode)

        # Acquire a file lock
        op = fcntl.LOCK_EX if self._write is True else fcntl.LOCK_SH
        fcntl.flock(self._file.fileno(), op)

        try:
            self.data = pickle.load(self._file)
        except EOFError:
            self.data = {
                'jobs': [],
                'schedules': [],
                'workers': deque(),
                'next_job_id': 1,
                'next_schedule_id': 1,
                'next_worker_id': 1
            }

        if self._write is False:
            self._file.close()

        return self.data
Пример #3
0
 def __unflock(self, file):
     try:
         flock(file, LOCK_UN)
     except IOError:
         print "failed to unlock file" + file.name + " due to : " + e.strerror
         sys.exit(1)  # FIXME
     return True
Пример #4
0
def _save(cube, key, path):
    # 'r+' apparently does not create the file if it doesn't
    # already exist, so...
    with open(path, "a"):
        pass

    with open(path, "r+") as fh:
        try:
            flock(fh, LOCK_EX | LOCK_NB)
        except IOError, e:
            warnings.warn("can't immediately write-lock " "the file (%s), blocking ..." % e)
            flock(fh, LOCK_EX)

        fh.seek(0, 0)

        try:
            cubedict = pickle.load(fh)
        except EOFError:
            cubedict = mkd()

        try:
            cubedict.set(key, cube)
        except Exception, e:
            import traceback as tb

            tb.print_exc()
            print "type:", type(e)
            print "str:", str(e)
            print "message: <<%s>>" % e.message
            cubedict.delete(key)
            cubedict.set(key, cube)
Пример #5
0
 def acquire(self, wait=None):
     """
     Acquire a lock on the mutex, optionally given a maximum wait timeout
     :param wait: Time to wait for lock
     """
     if self._has_lock:
         return True
     self._start = time.time()
     if wait is None:
         wait = self._wait
     if wait is None:
         fcntl.flock(self._handle, fcntl.LOCK_EX)
         passed = time.time() - self._start
     else:
         passed = time.time() - self._start
         while True:
             passed = time.time() - self._start
             if passed > wait:
                 logger.error('Lock for {0} could not be acquired. {1} sec > {2} sec'.format(self.key(), passed, wait))
                 raise NoLockAvailableException('Could not acquire lock %s' % self.key())
             try:
                 fcntl.flock(self._handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
                 break
             except IOError:
                 time.sleep(0.005)
     if passed > 1:  # More than 1 s is a long time to wait!
         logger.warning('Waited {0} sec for lock {1}'.format(passed, self.key()))
     self._start = time.time()
     self._has_lock = True
     return True
Пример #6
0
def release_flock_lock(file_handle):
    """ Release a lock created by take_flock_lock

    Args:
    file_handle - The return from take_flock_lock()
    """
    fcntl.flock(file_handle.fileno(), fcntl.LOCK_UN)
Пример #7
0
 def __exflock(self, file):
     try:
         flock(file, LOCK_EX)
     except IOError as e:
         print "failed to lock file" + file.name + " due to : " + e.strerror
         sys.exit(1)  # FIXME
     return True
Пример #8
0
def store_plan_hints(filename, locking=True, reload_first=True):
    """Store data about the best FFT plans for this computer.

    FFT planning can take quite a while. After planning, the knowledge about
    the best plan for a given computer and given transform parameters can be
    written to disk so that the next time, planning can make use of that
    knowledge.

    Parameters:
        filename: file to write hints to.
        locking: if True, attempt to acquire an exclusive lock before writing
            which can otherwise cause problems if multiple processes are
            attempting to write to the same plan hints file.
        reload_first: if True, if the file exists, load the plan hints before
            storing them back. Safer in a multi-process setting where the hints
            may be written by a different process.

    """
    filename = pathlib.Path(filename)
    if not filename.exists():
        filename.touch() # can't open a file for read/write updating if it doesn't exist...
    with filename.open('r+b') as f:
        if locking:
            import fcntl
            fcntl.flock(f, fcntl.LOCK_EX)
        if reload_first:
            try:
                pyfftw.import_wisdom(pickle.load(f))
            except:
                pass
            f.seek(0)
        pickle.dump(pyfftw.export_wisdom(), f)
        if locking:
            fcntl.flock(f, fcntl.LOCK_UN)
Пример #9
0
def load_plan_hints(filename, locking=True):
    """Load data about the best FFT plans for this computer.

    FFT planning can take quite a while. After planning, the knowledge about
    the best plan for a given computer and given transform parameters can be
    written to disk so that the next time, planning can make use of that
    knowledge.

    Parameters:
        filename: file to read hints from.
        locking: if True, attempt to acquire an exclusive lock before reading
            which can otherwise cause problems if multiple processes are
            attempting to write to the same plan hints file.

    Returns True if plan hints were successfully loaded.

    """
    with open(filename, 'rb') as f:
        if locking:
            import fcntl
            fcntl.flock(f, fcntl.LOCK_EX)
        loaded = pyfftw.import_wisdom(pickle.load(f))
        if locking:
            fcntl.flock(f, fcntl.LOCK_UN)
        return all(loaded)
Пример #10
0
def lock_file(filename, timeout=10, append=False, unlink=True):
    """
    Context manager that acquires a lock on a file.  This will block until
    the lock can be acquired, or the timeout time has expired (whichever occurs
    first).

    :param filename: file to be locked
    :param timeout: timeout (in seconds)
    :param append: True if file should be opened in append mode
    :param unlink: True if the file should be unlinked at the end
    """
    flags = os.O_CREAT | os.O_RDWR
    if append:
        flags |= os.O_APPEND
    fd = os.open(filename, flags)
    try:
        with LockTimeout(timeout, filename):
            while True:
                try:
                    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    break
                except IOError, err:
                    if err.errno != errno.EAGAIN:
                        raise
                sleep(0.01)
        mode = 'r+'
        if append:
            mode = 'a+'
        file_obj = os.fdopen(fd, mode)
        yield file_obj
Пример #11
0
def file_update_many(fh, points):
  if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
  header = __readHeader(fh)
  now = int( time.time() )
  archives = iter( header['archives'] )
  currentArchive = archives.next()
  #debug('  update_many currentArchive=%s' % str(currentArchive))
  currentPoints = []
  for point in points:
    age = now - point[0]
    #debug('  update_many iterating points, point=%s age=%d' % (str(point),age))
    while currentArchive['retention'] < age: #we can't fit any more points in this archive
      #debug('  update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints))
      if currentPoints: #commit all the points we've found that it can fit
        currentPoints.reverse() #put points in chronological order
        __archive_update_many(fh,header,currentArchive,currentPoints)
        currentPoints = []
      try:
        currentArchive = archives.next()
        #debug('  update_many using next archive %s' % str(currentArchive))
      except StopIteration:
        #debug('  update_many no more archives!')
        currentArchive = None
        break
    if not currentArchive: break #drop remaining points that don't fit in the database
    #debug('  update_many adding point=%s' % str(point))
    currentPoints.append(point)
  #debug('  update_many done iterating points')
  if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives
    currentPoints.reverse()
    __archive_update_many(fh,header,currentArchive,currentPoints)
  __changeLastUpdate(fh)
  fh.close()
Пример #12
0
    def missing_host_key(self, client, hostname, key):

        if C.HOST_KEY_CHECKING:

            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)

            old_stdin = sys.stdin
            sys.stdin = self.runner._new_stdin
            fingerprint = hexlify(key.get_fingerprint())
            ktype = key.get_name()
            
            # clear out any premature input on sys.stdin
            tcflush(sys.stdin, TCIFLUSH)

            inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
            sys.stdin = old_stdin
            if inp not in ['yes','y','']:
                fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
                fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
                raise errors.AnsibleError("host connection rejected by user")

            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)


        key._added_by_ansible_this_time = True

        # existing implementation below:
        client._host_keys.add(hostname, key.get_name(), key)
Пример #13
0
def lock_path(directory, timeout=10):
    """
    Context manager that acquires a lock on a directory.  This will block until
    the lock can be acquired, or the timeout time has expired (whichever occurs
    first).

    For locking exclusively, file or directory has to be opened in Write mode.
    Python doesn't allow directories to be opened in Write Mode. So we
    workaround by locking a hidden file in the directory.

    :param directory: directory to be locked
    :param timeout: timeout (in seconds)
    """
    mkdirs(directory)
    lockpath = '%s/.lock' % directory
    fd = os.open(lockpath, os.O_WRONLY | os.O_CREAT)
    try:
        with LockTimeout(timeout, lockpath):
            while True:
                try:
                    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    break
                except IOError, err:
                    if err.errno != errno.EAGAIN:
                        raise
                sleep(0.01)
        yield True
Пример #14
0
 def acquire(self):
     try:
         fcntl.flock(self.fd, self.mode)
         self.locked = True
     except IOError as e:
         if e.errno != errno.ENOLCK:
             raise e
Пример #15
0
    def copy_dom0_clipboard(self, *_args, **_kwargs):
        clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
        text = clipboard.wait_for_text()

        if not text:
            self.notify("dom0 clipboard is empty!")
            return

        try:
            fd = os.open(APPVIEWER_LOCK, os.O_RDWR | os.O_CREAT, 0o0666)
        except Exception:  # pylint: disable=broad-except
            self.notify("Error while accessing Qubes clipboard!")
        else:
            try:
                fcntl.flock(fd, fcntl.LOCK_EX)
            except Exception:  # pylint: disable=broad-except
                self.notify("Error while locking Qubes clipboard!")
                os.close(fd)
            else:
                try:
                    with open(DATA, "w") as contents:
                        contents.write(text)
                    with open(FROM, "w") as source:
                        source.write("dom0")
                    with open(XEVENT, "w") as timestamp:
                        timestamp.write(str(Gtk.get_current_event_time()))
                except Exception as ex:  # pylint: disable=broad-except
                    self.notify("Error while writing to "
                                "Qubes clipboard!\n{0}".format(str(ex)))
                fcntl.flock(fd, fcntl.LOCK_UN)
                os.close(fd)
Пример #16
0
def write(path, append=False, pid=None):
    try:
        if pid is None:
            pid = os.getpid()
        if append:
            pidfile = open(path, 'a+b')
        else:
            pidfile = open(path, 'wb')
        # get a blocking exclusive lock, we may have multiple
        # processes updating this pid file.
        fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX)
        if append:
            pidfile.write(" %d" % pid)
        else:
            # clear out the file
            pidfile.seek(0)
            pidfile.truncate(0)
            # write the pid
            pidfile.write(str(pid))
        logging.info("Writing PID %s to '%s'", pid, path)
    except:
        raise
    finally:
        try:
            pidfile.close()
        except:
            pass
Пример #17
0
    def create_lockfile(self):
        # If pidfile already exists, we should read pid from there; to overwrite it, if locking
        # will fail, because locking attempt somehow purges the file contents.
        if os.path.isfile(self.pidfile):
            with open(self.pidfile, "r") as old_pidfile:
                old_pid = old_pidfile.read()

        # Create a lockfile so that only one instance of this daemon is running at any time.
        try:
            lockfile = open(self.pidfile, "w")
        except IOError:
            print("Unable to create the pidfile.", file=sys.stderr)
            raise SystemExit(1)

        try:
            # Try to get an exclusive lock on the file.
            # This will fail if another process has the file locked.
            fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            print("Unable to lock on the pidfile.", file=sys.stderr)

            # We need to overwrite the pidfile if we got here.
            with open(self.pidfile, "w") as pidfile:
                pidfile.write(old_pid)

            raise SystemExit(1)

        return lockfile
Пример #18
0
        def lock_files(handles_dir):

            with lockutils.lock('external', 'test-', external=True):
                # Open some files we can use for locking
                handles = []
                for n in range(50):
                    path = os.path.join(handles_dir, ('file-%s' % n))
                    handles.append(open(path, 'w'))

                # Loop over all the handles and try locking the file
                # without blocking, keep a count of how many files we
                # were able to lock and then unlock. If the lock fails
                # we get an IOError and bail out with bad exit code
                count = 0
                for handle in handles:
                    try:
                        fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
                        count += 1
                        fcntl.flock(handle, fcntl.LOCK_UN)
                    except IOError:
                        os._exit(2)
                    finally:
                        handle.close()

                # Check if we were able to open all files
                self.assertEqual(50, count)
Пример #19
0
def file_lock(lockfile):
    with open(lockfile) as f:
        fcntl.flock(f,fcntl.LOCK_EX)
        try:
            yield
        finally:
            fcntl.flock(f,fcntl.LOCK_UN)
Пример #20
0
 def acquire(self):  
     # 给文件上锁  
     if fcntl:  
         fcntl.flock(self.handle, LOCK_EX)  
     else:  
         hfile = win32file._get_osfhandle(self.handle.fileno())  
         win32file.LockFileEx(hfile, LOCK_EX, 0, -0x10000, overlapped)  
Пример #21
0
 def release(self):  
     # 文件解锁  
     if fcntl:  
         fcntl.flock(self.handle, fcntl.LOCK_UN)  
     else:  
         hfile = win32file._get_osfhandle(self.handle.fileno())  
         win32file.UnlockFileEx(hfile, 0, -0x10000, overlapped)  
Пример #22
0
Файл: util.py Проект: ahale/brim
def lock_path(path, timeout):
    """
    A context manager that attempts to gain an advisory lock for the
    path given within the timeout given. Raises LockPathTimeout if
    time expires before gaining the lock. If the lock is obtained,
    True is yielded and the lock relinquished with the context ends.

    For example::

        with lock_path(path, timeout):
            # do things inside path knowing others using the same
            # advisory locking mechanism will be blocked until you're
            # done.

    :param path: The path to gain an advisory lock on.
    :param timeout: The number of seconds to wait to gain the lock
                    before raising LockPathTimeout.
    """
    fd = os_open(path, O_RDONLY)
    try:
        try_until = time() + timeout
        while True:
            try:
                flock(fd, LOCK_EX | LOCK_NB)
                break
            except IOError, err:
                if err.errno != EAGAIN:
                    raise
            sleep(0.01)
            if time() >= try_until:
                raise LockPathTimeout(
                    'Timeout %ds trying to lock %r.' % (timeout, path))
        yield True
Пример #23
0
 def run(self):
     _logger.debug("mail : start")
     tmpFileList = glob.glob('%s/mail_*' % panda_config.logdir)
     for tmpFile in tmpFileList:
         # check timestamp to avoid too new files
         timeStamp = os.path.getmtime(tmpFile)
         if datetime.datetime.utcnow() - datetime.datetime.fromtimestamp(timeStamp) < datetime.timedelta(minutes=1):
             continue
         # lock
         mailFile = open(tmpFile)
         try:
             fcntl.flock(mailFile.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
         except:
             _logger.debug("mail : failed to lock %s" % tmpFile.split('/')[-1])
             mailFile.close()
             continue
         # start notifier
         from dataservice.Notifier import Notifier
         nThr = Notifier(None,None,None,None,mailFile,tmpFile)
         nThr.run()
         # remove
         try:
             os.remove(tmpFile)
         except:
             pass
         # unlock
         try:
             fcntl.flock(self.lockXML.fileno(), fcntl.LOCK_UN)
             mailFile.close()
         except:
             pass
Пример #24
0
def lock_file(fd, flags):
    """lock file. """
    try:
        fcntl.flock(fd, flags)
        return (True, 0)
    except IOError, ex_value:
        return (False, ex_value[0])
Пример #25
0
    def _update_suite_index(self, repository):
        """Updates the Release file in the suite."""
        path = os.path.join(
            utils.get_path_from_url(repository.url),
            "dists", repository.section[0]
        )
        release_path = os.path.join(path, "Release")
        self.logger.info(
            "added repository suite release file: %s", release_path
        )
        with open(release_path, "a+b") as fd:
            fcntl.flock(fd.fileno(), fcntl.LOCK_EX)
            try:
                fd.seek(0)
                release = deb822.Release(fd)
                self._add_to_release(release, repository)
                for m in _CHECKSUM_METHODS:
                    release.setdefault(m, [])

                self._add_files_to_release(
                    release, path, self._get_metafiles(repository)
                )

                fd.truncate(0)
                release.dump(fd)
            finally:
                fcntl.flock(fd.fileno(), fcntl.LOCK_UN)
Пример #26
0
def lockfile(name, shared=False):
    """
    lockfile: take out a file-based lock
    :param name: name of file
    :param shared: take out a shared, rather than exclusive, lock (default: False)
    :return: object to pass to unlockfile
    """
    dirname = os.path.dirname(name)
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    f = None
    while True:
        # noinspection PyBroadException
        try:
            f = open(name, 'a+')
            fno = f.fileno()
            fcntl.flock(fno, fcntl.LOCK_SH if shared else fcntl.LOCK_EX)
            stat1 = os.fstat(fno)
            if os.path.exists(f.name):
                stat2 = os.stat(f.name)
                if stat1.st_ino == stat2.st_ino:
                    return f
            f.close()
        except Exception:
            # noinspection PyBroadException
            try:
                f.close()
            except Exception:
                pass
            pass
Пример #27
0
def unlock_file(fd):
    """unlock file. """
    try:
        fcntl.flock(fd, fcntl.LOCK_UN)
        return (True, 0)
    except IOError, ex_value:
        return (False, ex_value[0])
Пример #28
0
def my_lock(lockname):
    # lockname is 'inner'
    lockdir = os.environ['AUTODIR']
    lockname = os.path.join(lockdir, '.cpuset.lock.'+lockname)
    lockfile = open(lockname, 'w')
    fcntl.flock(lockfile, fcntl.LOCK_EX)
    return lockfile
Пример #29
0
def get_lock():
    """Gets a file lock, if necessary, and using params determined by flags.

    If we cannot obtain the lock, exit. (Either successfully or unsuccessfully,
    depending on flags.)
    """
    fh = None
    # We don't do anything unless --synchronous_name is set.
    if args.synchronous_name is not None:
        if not os.path.isdir(args.synchronization_dir):
            log('--synchronization_dir does not exist, attempting to create')
            os.mkdir(args.synchronization_dir)

        lock = os.path.join(args.synchronization_dir, args.synchronous_name)
        fh = open(lock, 'w')
        log('Acquiring lock on %s' % lock)
        if args.nonblocking:
            try:
                fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                log('We did not get the lock but --nonblocking is true; '
                        'exiting successfully')
                fh.close()
                sys.exit(0)
        else:
            # Wait indefinitely. Hopefully there is a timeout on the synchro.py
            # holding the lock.
            fcntl.flock(fh, fcntl.LOCK_EX)
        log('Lock acquired')
    return fh
Пример #30
0
    def lock(self):
        if not os.path.exists(os.path.dirname(self.lock_file)):
            os.makedirs(os.path.dirname(self.lock_file))
        self.log_debug("Attempting to lock {0}..."
                 .format(self.lock_file))

        self.lock_fd = open(self.lock_file, 'w')

        try:
            fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError as exc_value:
            self.lock_fd.close()
            # IOError: [Errno 11] Resource temporarily unavailable
            if exc_value[0] == 11:
                with open(self.pid_file, 'r') as pid_fd:
                    self._lock_failure(pid_fd.read())
            else:
                raise

        self.log_debug("Lock acquired")

        try:
            with open(self.pid_file, 'w') as pid_fd:
                pid_fd.write(unicode(os.getpid()))
        except IOError as exc_value:
            # IOError: [Errno 2] No such file or directory
            if exc_value[0] == 2:
                raise Exception("Failed to acquire lock on {0}, but the "
                                "the process that has it hasn't written "
                                "the PID file {1} yet."
                                .format(self.lock_file, self.pid_file))
            else:
                raise
Пример #31
0
 def unlock(self):
     assert not fcntl.flock(self.fd, fcntl.LOCK_UN)
Пример #32
0
 def ExecFlock(self, lockfile, *cmd_list):
   """Emulates the most basic behavior of Linux's flock(1)."""
   # Rely on exception handling to report errors.
   fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
   fcntl.flock(fd, fcntl.LOCK_EX)
   return subprocess.call(cmd_list)
Пример #33
0
def generate_json_common_c(out):
    with open(os.path.join(out, 'json_common.c'), "w") as source_file:
        fcntl.flock(source_file, fcntl.LOCK_EX)
        source_file.write("""// Auto generated file. Do not edit!
# define _GNU_SOURCE
# include <stdio.h>
# include <string.h>
# include <errno.h>
# include <limits.h>
# include "json_common.h"

# define MAX_NUM_STR_LEN 21

yajl_gen_status
map_uint (void *ctx, long long unsigned int num)
{
  char numstr[MAX_NUM_STR_LEN];
  int ret;

  ret = snprintf (numstr, sizeof (numstr), "%llu", num);
  if (ret < 0 || (size_t) ret >= sizeof (numstr))
    return yajl_gen_in_error_state;
  return yajl_gen_number ((yajl_gen) ctx, (const char *) numstr,
			  strlen (numstr));
}

yajl_gen_status
map_int (void *ctx, long long int num)
{
  char numstr[MAX_NUM_STR_LEN];
  int ret;

  ret = snprintf (numstr, sizeof (numstr), "%lld", num);
  if (ret < 0 || (size_t) ret >= sizeof (numstr))
    return yajl_gen_in_error_state;
  return yajl_gen_number ((yajl_gen) ctx, (const char *) numstr,
			  strlen (numstr));
}


bool
json_gen_init (yajl_gen * g, const struct parser_context *ctx)
{
  *g = yajl_gen_alloc (NULL);
  if (NULL == *g)
    return false;

  yajl_gen_config (*g, yajl_gen_beautify,
		   (int) (!(ctx->options & OPT_GEN_SIMPLIFY)));
  yajl_gen_config (*g, yajl_gen_validate_utf8,
		   (int) (!(ctx->options & OPT_GEN_NO_VALIDATE_UTF8)));
  return true;
}

yajl_val
get_val (yajl_val tree, const char *name, yajl_type type)
{
  const char *path[] = { name, NULL };
  return yajl_tree_get (tree, path, type);
}

int
common_safe_double (const char *numstr, double *converted)
{
  char *err_str = NULL;
  double d;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  d = strtod (numstr, &err_str);
  if (errno > 0)
    return -errno;


  if (err_str == NULL || err_str == numstr || *err_str != '\\0')
    return -EINVAL;

  *converted = d;
  return 0;
}

int
common_safe_uint8 (const char *numstr, uint8_t * converted)
{
  char *err = NULL;
  unsigned long int uli;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  uli = strtoul (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (uli > UINT8_MAX)
    return -ERANGE;

  *converted = (uint8_t) uli;
  return 0;
}

int
common_safe_uint16 (const char *numstr, uint16_t * converted)
{
  char *err = NULL;
  unsigned long int uli;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  uli = strtoul (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (uli > UINT16_MAX)
    return -ERANGE;

  *converted = (uint16_t) uli;
  return 0;
}

int
common_safe_uint32 (const char *numstr, uint32_t * converted)
{
  char *err = NULL;
  unsigned long long int ull;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  ull = strtoull (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (ull > UINT32_MAX)
    return -ERANGE;

  *converted = (uint32_t) ull;
  return 0;
}

int
common_safe_uint64 (const char *numstr, uint64_t * converted)
{
  char *err = NULL;
  unsigned long long int ull;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  ull = strtoull (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  *converted = (uint64_t) ull;
  return 0;
}

int
common_safe_uint (const char *numstr, unsigned int *converted)
{
  char *err = NULL;
  unsigned long long int ull;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  ull = strtoull (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (ull > UINT_MAX)
    return -ERANGE;

  *converted = (unsigned int) ull;
  return 0;
}

int
common_safe_int8 (const char *numstr, int8_t * converted)
{
  char *err = NULL;
  long int li;

  if (numstr == NULL)
    {
      return -EINVAL;
    }

  errno = 0;
  li = strtol (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (li > INT8_MAX || li < INT8_MIN)
    return -ERANGE;

  *converted = (int8_t) li;
  return 0;
}

int
common_safe_int16 (const char *numstr, int16_t * converted)
{
  char *err = NULL;
  long int li;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  li = strtol (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (li > INT16_MAX || li < INT16_MIN)
    return -ERANGE;

  *converted = (int16_t) li;
  return 0;
}

int
common_safe_int32 (const char *numstr, int32_t * converted)
{
  char *err = NULL;
  long long int lli;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  lli = strtol (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (lli > INT32_MAX || lli < INT32_MIN)

    return -ERANGE;

  *converted = (int32_t) lli;
  return 0;
}

int
common_safe_int64 (const char *numstr, int64_t * converted)
{
  char *err = NULL;
  long long int lli;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  lli = strtoll (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  *converted = (int64_t) lli;
  return 0;
}

int
common_safe_int (const char *numstr, int *converted)
{
  char *err = NULL;
  long long int lli;

  if (numstr == NULL)
    return -EINVAL;

  errno = 0;
  lli = strtol (numstr, &err, 0);
  if (errno > 0)
    return -errno;

  if (err == NULL || err == numstr || *err != '\\0')
    return -EINVAL;

  if (lli > INT_MAX || lli < INT_MIN)
    return -ERANGE;

  *converted = (int) lli;
  return 0;
}

yajl_gen_status
gen_json_map_int_int (void *ctx, const json_map_int_int * map,
		      const struct parser_context *ptx, parser_error * err)
{
  yajl_gen_status stat = yajl_gen_status_ok;
  yajl_gen g = (yajl_gen) ctx;
  size_t len = 0, i = 0;
  if (map != NULL)
    len = map->len;
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 0);
  stat = yajl_gen_map_open ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  for (i = 0; i < len; i++)
    {
      char numstr[MAX_NUM_STR_LEN];
      int nret;
      nret =
	snprintf (numstr, sizeof (numstr), "%lld",
		  (long long int) map->keys[i]);
      if (nret < 0 || (size_t) nret >= sizeof (numstr))
	{
	  if (!*err && asprintf (err, "Error to print string") < 0)
	    *(err) = strdup ("error allocating memory");
	  return yajl_gen_in_error_state;
	}
      stat =
	yajl_gen_string ((yajl_gen) g, (const unsigned char *) numstr,
			 strlen (numstr));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
      stat = map_int (g, map->values[i]);
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
    }

  stat = yajl_gen_map_close ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 1);
  return yajl_gen_status_ok;
}

void
free_json_map_int_int (json_map_int_int * map)
{
  if (map != NULL)
    {
      free (map->keys);
      map->keys = NULL;
      free (map->values);
      map->values = NULL;
      free (map);
    }
}

json_map_int_int *
make_json_map_int_int (yajl_val src, const struct parser_context *ctx,
		       parser_error * err)
{
  (void) ctx;			/* Silence compiler warning.  */
  json_map_int_int *ret = NULL;
  if (src != NULL && YAJL_GET_OBJECT (src) != NULL)
    {
      size_t i;
      size_t len = YAJL_GET_OBJECT (src)->len;
      ret = calloc (1, sizeof (*ret));
      if (ret == NULL)
        return NULL;
      ret->len = len;
      ret->keys = calloc (1, (len + 1) * sizeof (int));
      if (ret->keys == NULL)
        {
          free (ret);
          return NULL;
        }
      ret->values = calloc (1, (len + 1) * sizeof (int));
      if (ret->values == NULL)
        {
          free (ret->keys);
          free (ret);
          return NULL;
        }
      for (i = 0; i < len; i++)
	{
	  const char *srckey = YAJL_GET_OBJECT (src)->keys[i];
	  yajl_val srcval = YAJL_GET_OBJECT (src)->values[i];

	  if (srckey != NULL)
	    {
	      int invalid;
	      invalid = common_safe_int (srckey, &(ret->keys[i]));
	      if (invalid)
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid key '%s' with type 'int': %s",
				   srckey, strerror (-invalid)) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_int_int (ret);
		  return NULL;
		}
	    }

	  if (srcval != NULL)
	    {
	      int invalid;
	      if (!YAJL_IS_NUMBER (srcval))
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'int' for key '%s'",
				   srckey) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_int_int (ret);
		  return NULL;
		}
	      invalid =
		common_safe_int (YAJL_GET_NUMBER (srcval), &(ret->values[i]));
	      if (invalid)
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'int' for key '%s': %s",
				   srckey, strerror (-invalid)) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_int_int (ret);
		  return NULL;
		}
	    }
	}
    }
  return ret;
}

int
append_json_map_int_int (json_map_int_int * map, int key, int val)
{
  size_t len;
  int *keys = NULL;
  int *vals = NULL;

  if (map == NULL)
    return -1;

  if ((SIZE_MAX / sizeof (int) - 1) < map->len)
    return -1;

  len = map->len + 1;
  keys = calloc (1, len * sizeof (int));
  if (keys == NULL)
    return -1;
  vals = calloc (1, len * sizeof (int));
  if (vals == NULL)
    {
      free (keys);
      return -1;
    }

  if (map->len)
    {
      (void) memcpy (keys, map->keys, map->len * sizeof (int));
      (void) memcpy (vals, map->values, map->len * sizeof (int));
    }
  free (map->keys);
  map->keys = keys;
  free (map->values);
  map->values = vals;
  map->keys[map->len] = key;
  map->values[map->len] = val;

  map->len++;
  return 0;
}

yajl_gen_status
gen_json_map_int_bool (void *ctx, const json_map_int_bool * map,
		       const struct parser_context *ptx, parser_error * err)
{
  yajl_gen_status stat = yajl_gen_status_ok;
  yajl_gen g = (yajl_gen) ctx;
  size_t len = 0, i = 0;
  if (map != NULL)
    len = map->len;
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 0);
  stat = yajl_gen_map_open ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  for (i = 0; i < len; i++)
    {
      char numstr[MAX_NUM_STR_LEN];
      int nret;
      nret =
	snprintf (numstr, sizeof (numstr), "%lld",
		  (long long int) map->keys[i]);
      if (nret < 0 || (size_t) nret >= sizeof (numstr))
	{
	  if (!*err && asprintf (err, "Error to print string") < 0)
	    *(err) = strdup ("error allocating memory");
	  return yajl_gen_in_error_state;
	}
      stat =
	yajl_gen_string ((yajl_gen) g, (const unsigned char *) numstr,
			 strlen (numstr));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
      stat = yajl_gen_bool ((yajl_gen) g, (int) (map->values[i]));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
    }

  stat = yajl_gen_map_close ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 1);
  return yajl_gen_status_ok;
}

void
free_json_map_int_bool (json_map_int_bool * map)
{
  if (map != NULL)
    {
      size_t i;
      for (i = 0; i < map->len; i++)
	{
	  // No need to free key for type int
	  // No need to free value for type bool
	}
      free (map->keys);
      map->keys = NULL;
      free (map->values);
      map->values = NULL;
      free (map);
    }
}

json_map_int_bool *
make_json_map_int_bool (yajl_val src, const struct parser_context *ctx,
			parser_error * err)
{
  (void) ctx;			/* Silence compiler warning.  */
  json_map_int_bool *ret = NULL;
  if (src != NULL && YAJL_GET_OBJECT (src) != NULL)
    {
      size_t i;
      size_t len = YAJL_GET_OBJECT (src)->len;
      ret = calloc (1, sizeof (*ret));
      if (ret == NULL)
        return NULL;
      ret->len = len;
      ret->keys = calloc (1, (len + 1) * sizeof (int));
      if (ret->keys == NULL)
        {
          free (ret);
          return NULL;
        }
      ret->values = calloc (1, (len + 1) * sizeof (bool));
      if (ret->values == NULL)
        {
          free (ret->keys);
          free (ret);
          return NULL;
        }
      for (i = 0; i < len; i++)
	{
	  const char *srckey = YAJL_GET_OBJECT (src)->keys[i];
	  yajl_val srcval = YAJL_GET_OBJECT (src)->values[i];

	  if (srckey != NULL)
	    {
	      int invalid;
	      invalid = common_safe_int (srckey, &(ret->keys[i]));
	      if (invalid)
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid key '%s' with type 'int': %s",
				   srckey, strerror (-invalid)) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_int_bool (ret);
		  return NULL;
		}
	    }

	  if (srcval != NULL)
	    {
	      if (YAJL_IS_TRUE (srcval))
		ret->values[i] = true;
	      else if (YAJL_IS_FALSE (srcval))
		ret->values[i] = false;
	      else
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'bool' for key '%s'",
				   srckey) < 0)
		    {
		      *(err) = strdup ("error allocating memory");
		    }
		  free_json_map_int_bool (ret);
		  return NULL;
		}
	    }
	}
    }
  return ret;
}

int
append_json_map_int_bool (json_map_int_bool * map, int key, bool val)
{
  size_t len;
  int *keys = NULL;
  bool *vals = NULL;

  if (map == NULL)
    return -1;

  if ((SIZE_MAX / sizeof (int) - 1) < map->len
      || (SIZE_MAX / sizeof (bool) - 1) < map->len)
    return -1;

  len = map->len + 1;
  keys = calloc (1, len * sizeof (int));
  if (keys == NULL)
    return -1;
  vals = calloc (1, len * sizeof (bool));
  if (vals == NULL)
    {
      free (keys);
      return -1;
    }

  if (map->len)
    {
      (void) memcpy (keys, map->keys, map->len * sizeof (int));
      (void) memcpy (vals, map->values, map->len * sizeof (bool));
    }
  free (map->keys);
  map->keys = keys;
  free (map->values);
  map->values = vals;
  map->keys[map->len] = key;
  map->values[map->len] = val;

  map->len++;
  return 0;
}

yajl_gen_status
gen_json_map_int_string (void *ctx, const json_map_int_string * map,
			 const struct parser_context *ptx, parser_error * err)
{
  yajl_gen_status stat = yajl_gen_status_ok;
  yajl_gen g = (yajl_gen) ctx;
  size_t len = 0, i = 0;
  if (map != NULL)
    len = map->len;
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 0);

  stat = yajl_gen_map_open ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  for (i = 0; i < len; i++)
    {
      char numstr[MAX_NUM_STR_LEN];
      int nret;
      nret =
	snprintf (numstr, sizeof (numstr), "%lld",
		  (long long int) map->keys[i]);
      if (nret < 0 || (size_t) nret >= sizeof (numstr))
	{
	  if (!*err && asprintf (err, "Error to print string") < 0)
	    *(err) = strdup ("error allocating memory");
	  return yajl_gen_in_error_state;
	}
      stat =
	yajl_gen_string ((yajl_gen) g, (const unsigned char *) numstr,
			 strlen (numstr));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
      stat =
	yajl_gen_string ((yajl_gen) g,
			 (const unsigned char *) (map->values[i]),
			 strlen (map->values[i]));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
    }

  stat = yajl_gen_map_close ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 1);
  return yajl_gen_status_ok;
}

void
free_json_map_int_string (json_map_int_string * map)
{
  if (map != NULL)
    {
      size_t i;
      for (i = 0; i < map->len; i++)
	{
	  // No need to free key for type int
	  free (map->values[i]);
	  map->values[i] = NULL;
	}
      free (map->keys);
      map->keys = NULL;
      free (map->values);
      map->values = NULL;
      free (map);
    }
}

json_map_int_string *
make_json_map_int_string (yajl_val src, const struct parser_context *ctx,
			  parser_error * err)
{
  (void) ctx;			/* Silence compiler warning.  */
  json_map_int_string *ret = NULL;
  if (src != NULL && YAJL_GET_OBJECT (src) != NULL)
    {
      size_t i;
      size_t len = YAJL_GET_OBJECT (src)->len;
      ret = calloc (1, sizeof (*ret));
      if (ret == NULL)
        return NULL;

      ret->len = len;
      ret->keys = calloc (1, (len + 1) * sizeof (int));
      if (ret->keys == NULL)
        {
          free (ret);
          return NULL;
        }
      ret->values = calloc (1, (len + 1) * sizeof (char *));
      if (ret->values == NULL)
        {
          free (ret->keys);
          free (ret);
          return NULL;
        }
      for (i = 0; i < len; i++)
	{
	  const char *srckey = YAJL_GET_OBJECT (src)->keys[i];
	  yajl_val srcval = YAJL_GET_OBJECT (src)->values[i];

	  if (srckey != NULL)
	    {
	      int invalid;
	      invalid = common_safe_int (srckey, &(ret->keys[i]));
	      if (invalid)
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid key '%s' with type 'int': %s",
				   srckey, strerror (-invalid)) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_int_string (ret);
		  return NULL;
		}
	    }

	  if (srcval != NULL)
	    {
	      if (!YAJL_IS_STRING (srcval))
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'string' for key '%s'",
				   srckey) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_int_string (ret);
		  return NULL;
		}
	      char *str = YAJL_GET_STRING (srcval);
	      ret->values[i] = strdup (str ? str : "");
	    }
	}
    }
  return ret;
}

int
append_json_map_int_string (json_map_int_string * map, int key,
			    const char *val)
{
  size_t len;
  int *keys = NULL;
  char **vals = NULL;
  char *new_value;

  if (map == NULL)
    return -1;

  if ((SIZE_MAX / sizeof (int) - 1) < map->len
      || (SIZE_MAX / sizeof (char *) - 1) < map->len)
    return -1;

  len = map->len + 1;
  keys = calloc (1, len * sizeof (int));
  if (keys == NULL)
    {
      free (keys);
      return -1;
    }
  vals = calloc (1, len * sizeof (char *));
  if (vals == NULL)
    {
      free (keys);
      return -1;
    }

  new_value = strdup (val ? val : "");
  if (new_value == NULL)
    {
      free (keys);
      free (vals);
      return -1;
    }

  if (map->len)
    {
      (void) memcpy (keys, map->keys, map->len * sizeof (int));
      (void) memcpy (vals, map->values, map->len * sizeof (char *));
    }
  free (map->keys);
  map->keys = keys;
  free (map->values);
  map->values = vals;
  map->keys[map->len] = key;
  map->values[map->len] = new_value;
  map->len++;
  return 0;
}

yajl_gen_status
gen_json_map_string_int (void *ctx, const json_map_string_int * map,
			 const struct parser_context *ptx, parser_error * err)
{
  yajl_gen_status stat = yajl_gen_status_ok;
  yajl_gen g = (yajl_gen) ctx;
  size_t len = 0, i = 0;
  if (map != NULL)
    len = map->len;
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 0);
  stat = yajl_gen_map_open ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  for (i = 0; i < len; i++)
    {
      stat =
	yajl_gen_string ((yajl_gen) g, (const unsigned char *) (map->keys[i]),
			 strlen (map->keys[i]));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
      stat = map_int (g, map->values[i]);
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
    }

  stat = yajl_gen_map_close ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 1);
  return yajl_gen_status_ok;
}

void
free_json_map_string_int (json_map_string_int * map)
{
  if (map != NULL)
    {
      size_t i;
      for (i = 0; i < map->len; i++)
	{
	  free (map->keys[i]);
	  map->keys[i] = NULL;
	}
      free (map->keys);
      map->keys = NULL;
      free (map->values);
      map->values = NULL;
      free (map);
    }
}

json_map_string_int *
make_json_map_string_int (yajl_val src, const struct parser_context *ctx,
			  parser_error * err)
{
  (void) ctx;			/* Silence compiler warning.  */
  json_map_string_int *ret = NULL;
  if (src != NULL && YAJL_GET_OBJECT (src) != NULL)
    {
      size_t i;
      size_t len = YAJL_GET_OBJECT (src)->len;
      ret = calloc (1, sizeof (*ret));
      if (ret->keys == NULL)
        {
          *(err) = strdup ("error allocating memory");
          return NULL;
        }
      ret->len = len;
      ret->keys = calloc (1, (len + 1) * sizeof (char *));
      if (ret->keys == NULL)
        {
          *(err) = strdup ("error allocating memory");
          free (ret);
          return NULL;
        }
      ret->values = calloc (1, (len + 1) * sizeof (int));
      if (ret->values == NULL)
        {
          *(err) = strdup ("error allocating memory");
          free (ret->keys);
          free (ret);
          return NULL;
        }
      for (i = 0; i < len; i++)
	{
	  const char *srckey = YAJL_GET_OBJECT (src)->keys[i];
	  yajl_val srcval = YAJL_GET_OBJECT (src)->values[i];
	  ret->keys[i] = strdup (srckey ? srckey : "");
          if (ret->keys[i] == NULL)
            {
              *(err) = strdup ("error allocating memory");
              free_json_map_string_int (ret);
              return NULL;
            }

	  if (srcval != NULL)
	    {
	      int invalid;
	      if (!YAJL_IS_NUMBER (srcval))
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'int' for key '%s'",
				   srckey) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_string_int (ret);
		  return NULL;
		}
	      invalid =
		common_safe_int (YAJL_GET_NUMBER (srcval), &(ret->values[i]));
	      if (invalid)
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'int' for key '%s': %s",
				   srckey, strerror (-invalid)) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_string_int (ret);
		  return NULL;
		}
	    }
	}
    }
  return ret;
}

int
append_json_map_string_int (json_map_string_int * map, const char *key,
			    int val)
{
  size_t len;
  char **keys = NULL;
  int *vals = NULL;
  char *new_value;

  if (map == NULL)
    return -1;

  if ((SIZE_MAX / sizeof (char *) - 1) < map->len
      || (SIZE_MAX / sizeof (int) - 1) < map->len)
    return -1;

  len = map->len + 1;
  keys = calloc (1, len * sizeof (char *));
  if (keys == NULL)
    return -1;
  vals = calloc (1, len * sizeof (int));
  if (vals == NULL)
    {
      free (keys);
      return -1;
    }
  new_value = strdup (key ? key : "");
  if (new_value == NULL)
    {
      free (vals);
      free (keys);
      return -1;
    }

  if (map->len)
    {
      (void) memcpy (keys, map->keys, map->len * sizeof (char *));
      (void) memcpy (vals, map->values, map->len * sizeof (int));
    }
  free (map->keys);
  map->keys = keys;
  free (map->values);
  map->values = vals;
  map->keys[map->len] = new_value;
  map->values[map->len] = val;

  map->len++;
  return 0;
}

yajl_gen_status
gen_json_map_string_bool (void *ctx, const json_map_string_bool * map,
			  const struct parser_context *ptx,
			  parser_error * err)
{
  yajl_gen_status stat = yajl_gen_status_ok;
  yajl_gen g = (yajl_gen) ctx;
  size_t len = 0, i = 0;
  if (map != NULL)
    len = map->len;
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 0);
  stat = yajl_gen_map_open ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  for (i = 0; i < len; i++)
    {
      stat =
	yajl_gen_string ((yajl_gen) g, (const unsigned char *) (map->keys[i]),
			 strlen (map->keys[i]));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
      stat = yajl_gen_bool ((yajl_gen) g, (int) (map->values[i]));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
    }

  stat = yajl_gen_map_close ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 1);
  return yajl_gen_status_ok;
}

void
free_json_map_string_bool (json_map_string_bool * map)
{
  if (map != NULL)
    {
      size_t i;
      for (i = 0; i < map->len; i++)
	{
	  free (map->keys[i]);
	  map->keys[i] = NULL;
	  // No need to free value for type bool
	}
      free (map->keys);
      map->keys = NULL;
      free (map->values);
      map->values = NULL;
      free (map);
    }
}

json_map_string_bool *
make_json_map_string_bool (yajl_val src, const struct parser_context *ctx,
			   parser_error * err)
{
  (void) ctx;			/* Silence compiler warning.  */
  json_map_string_bool *ret = NULL;
  if (src != NULL && YAJL_GET_OBJECT (src) != NULL)
    {
      size_t i;
      size_t len = YAJL_GET_OBJECT (src)->len;
      ret = calloc (1, sizeof (*ret));
      if (ret == NULL)
        return NULL;
      ret->len = len;
      ret->keys = calloc (1, (len + 1) * sizeof (char *));
      if (ret->keys == NULL)
        {
          free (ret);
          return NULL;
        }
      ret->values = calloc (1, (len + 1) * sizeof (bool));
      if (ret->values == NULL)
        {
          free (ret->values);
          free (ret);
          return NULL;
        }
      for (i = 0; i < len; i++)
	{
	  const char *srckey = YAJL_GET_OBJECT (src)->keys[i];
	  yajl_val srcval = YAJL_GET_OBJECT (src)->values[i];
	  ret->keys[i] = strdup (srckey ? srckey : "");
	  if (ret->keys[i] == NULL)
            {
              *(err) = strdup ("error allocating memory");
              free_json_map_string_bool (ret);
            }
	  if (srcval != NULL)
	    {
	      if (YAJL_IS_TRUE (srcval))
		ret->values[i] = true;
	      else if (YAJL_IS_FALSE (srcval))
		ret->values[i] = false;
	      else
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'bool' for key '%s'",
				   srckey) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_string_bool (ret);
		  return NULL;
		}
	    }
	}
    }
  return ret;
}

int
append_json_map_string_bool (json_map_string_bool * map, const char *key,
			     bool val)
{
  size_t len;
  char **keys = NULL;
  bool *vals = NULL;
  char *new_value;

  if (map == NULL)
    return -1;

  if ((SIZE_MAX / sizeof (char *) - 1) < map->len
      || (SIZE_MAX / sizeof (bool) - 1) < map->len)
    return -1;

  len = map->len + 1;
  keys = calloc (1, len * sizeof (char *));
  if (keys == NULL)
    return -1;
  vals = calloc (1, len * sizeof (bool));
  if (vals == NULL)
    {
      free (keys);
      return -1;
    }

  new_value = strdup (key ? key : "");
  if (new_value == NULL)
    {
      free (vals);
      free (keys);
      return -1;
    }

  if (map->len)
    {
      (void) memcpy (keys, map->keys, map->len * sizeof (char *));
      (void) memcpy (vals, map->values, map->len * sizeof (bool));
    }
  free (map->keys);
  map->keys = keys;
  free (map->values);
  map->values = vals;
  map->keys[map->len] = new_value;
  map->values[map->len] = val;

  map->len++;
  return 0;
}

yajl_gen_status
gen_json_map_string_string (void *ctx, const json_map_string_string * map,
			    const struct parser_context *ptx,
			    parser_error * err)
{
  yajl_gen_status stat = yajl_gen_status_ok;
  yajl_gen g = (yajl_gen) ctx;
  size_t len = 0, i = 0;
  if (map != NULL)
    len = map->len;

  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 0);

  stat = yajl_gen_map_open ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);

  for (i = 0; i < len; i++)
    {
      stat =
	yajl_gen_string ((yajl_gen) g, (const unsigned char *) (map->keys[i]),
			 strlen (map->keys[i]));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
      stat =
	yajl_gen_string ((yajl_gen) g,
			 (const unsigned char *) (map->values[i]),
			 strlen (map->values[i]));
      if (yajl_gen_status_ok != stat)
	GEN_SET_ERROR_AND_RETURN (stat, err);
    }

  stat = yajl_gen_map_close ((yajl_gen) g);
  if (yajl_gen_status_ok != stat)
    GEN_SET_ERROR_AND_RETURN (stat, err);
  if (!len && !(ptx->options & OPT_GEN_SIMPLIFY))
    yajl_gen_config (g, yajl_gen_beautify, 1);
  return yajl_gen_status_ok;
}

void
free_json_map_string_string (json_map_string_string * map)
{
  if (map != NULL)
    {
      size_t i;
      for (i = 0; i < map->len; i++)
	{
	  free (map->keys[i]);
	  map->keys[i] = NULL;
	  free (map->values[i]);
	  map->values[i] = NULL;
	}
      free (map->keys);
      map->keys = NULL;
      free (map->values);
      map->values = NULL;
      free (map);
    }
}

json_map_string_string *
make_json_map_string_string (yajl_val src, const struct parser_context *ctx,
			     parser_error * err)
{
  (void) ctx;			/* Silence compiler warning.  */
  json_map_string_string *ret = NULL;
  if (src != NULL && YAJL_GET_OBJECT (src) != NULL)
    {
      size_t i;
      size_t len = YAJL_GET_OBJECT (src)->len;
      ret = malloc (sizeof (*ret));
      if (ret == NULL)
        {
          *(err) = strdup ("error allocating memory");
          return NULL;
        }
      ret->len = len;
      ret->keys = calloc (1, (len + 1) * sizeof (char *));
      if (ret->keys == NULL)
        {
          *(err) = strdup ("error allocating memory");
          free (ret);
          return NULL;
        }
      ret->values = calloc (1, (len + 1) * sizeof (char *));
      if (ret->values == NULL)
        {
          *(err) = strdup ("error allocating memory");
          free (ret->keys);
          free (ret);
          return NULL;
        }
      for (i = 0; i < len; i++)
	{
	  const char *srckey = YAJL_GET_OBJECT (src)->keys[i];
	  yajl_val srcval = YAJL_GET_OBJECT (src)->values[i];

	  ret->keys[i] = strdup (srckey ? srckey : "");
          if (ret->keys[i] == NULL)
            {
              free_json_map_string_string (ret);
              return NULL;
            }
	  if (srcval != NULL)
	    {
	      char *str;
	      if (!YAJL_IS_STRING (srcval))
		{
		  if (*err == NULL
		      && asprintf (err,
				   "Invalid value with type 'string' for key '%s'",
				   srckey) < 0)
		    *(err) = strdup ("error allocating memory");
		  free_json_map_string_string (ret);
		  return NULL;
		}

	      str = YAJL_GET_STRING (srcval);
	      ret->values[i] = strdup (str ? str : "");
              if (ret->values[i] == NULL)
                {
		  free_json_map_string_string (ret);
		  return NULL;
		}
	    }
	}
    }
  return ret;
}

int
append_json_map_string_string (json_map_string_string * map, const char *key,
			       const char *val)
{
  size_t len, i;
  char **keys = NULL;
  char **vals = NULL;
  char *new_key = NULL;
  char *new_value = NULL;

  if (map == NULL)
    return -1;

  for (i = 0; i < map->len; i++)
    {
      if (strcmp (map->keys[i], key) == 0)
	{
          char *v = strdup (val ? val : "");
          if (v == NULL)
            return -1;
	  free (map->values[i]);
	  map->values[i] = v;
	  return 0;
	}
    }

  if ((SIZE_MAX / sizeof (char *) - 1) < map->len)
    return -1;

  new_key = strdup (key ? key : "");
  if (new_key == NULL)
    goto cleanup;
  new_value = strdup (val ? val : "");
  if (new_value == NULL)
    goto cleanup;

  len = map->len + 1;
  keys = calloc (1, len * sizeof (char *));
  if (keys == NULL)
    goto cleanup;
  vals = calloc (1, len * sizeof (char *));
  if (vals == NULL)
    goto cleanup;

  if (map->len)
    {
      (void) memcpy (keys, map->keys, map->len * sizeof (char *));
      (void) memcpy (vals, map->values, map->len * sizeof (char *));
    }
  free (map->keys);
  map->keys = keys;
  free (map->values);
  map->values = vals;
  map->keys[map->len] = new_key;
  map->values[map->len] = new_value;

  map->len++;
  return 0;
 cleanup:

  free (keys);
  free (vals);
  free (new_key);
  free (new_value);
  return -1;
}

char *
json_marshal_string (const char *str, size_t length,
		     const struct parser_context *ctx, parser_error * err)
{
  yajl_gen g = NULL;
  struct parser_context tmp_ctx = { 0 };
  const unsigned char *gen_buf = NULL;
  char *json_buf = NULL;
  size_t gen_len = 0;
  yajl_gen_status stat;

  if (str == NULL || err == NULL)
    return NULL;

  *err = NULL;
  if (ctx == NULL)
    ctx = (const struct parser_context *) (&tmp_ctx);

  if (!json_gen_init (&g, ctx))
    {
      *err = strdup ("Json_gen init failed");
      goto out;
    }
  stat = yajl_gen_string ((yajl_gen) g, (const unsigned char *) str, length);
  if (yajl_gen_status_ok != stat)
    {
      if (asprintf (err, "error generating json, errcode: %d", (int) stat) <
	  0)
	{
	  *err = strdup ("error allocating memory");
	}
      goto free_out;
    }
  yajl_gen_get_buf (g, &gen_buf, &gen_len);
  if (gen_buf == NULL)
    {
      *err = strdup ("Error to get generated json");
      goto free_out;
    }

  json_buf = calloc (1, gen_len + 1);
  if (json_buf == NULL)
    {
      *err = strdup ("error allocating memory");
      goto free_out;
    }

  (void) memcpy (json_buf, gen_buf, gen_len);
  json_buf[gen_len] = '\\0';

free_out:
  yajl_gen_clear (g);
  yajl_gen_free (g);
out:
  return json_buf;
}
        """)
        fcntl.flock(source_file, fcntl.LOCK_UN)
Пример #34
0
 def unlock(self, handle):
     """Unlocks lock"""
     fcntl.flock(handle, fcntl.LOCK_UN)
Пример #35
0
 def try_lock(self, handle):
     """Tries to lock the file"""
     fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
Пример #36
0
 def _unlock_file(self, fd):
     fcntl.flock(fd, fcntl.LOCK_UN)
Пример #37
0
 def _lock_file_exclusively(self, fd):
     fcntl.flock(fd, fcntl.LOCK_EX)
Пример #38
0
    def flock(  # pyre-fixme
        self,
        filename: str,
        mode: str = "",
        exclusive: bool = False,
        wait: bool = False,
        update_atime: bool = False,
    ) -> IO[Any]:
        """
        Open a file and an advisory lock on it. The file is closed and the lock released upon exit
        of the outermost context. Returns the open file, which the caller shouldn't close (this is
        taken care of).

        :param filename: file to open & lock
        :param mode: open() mode, default: "r+b" if exclusive else "rb"
        :param exclusive: True to open an exclusive lock (default: shared lock)p
        :param wait: True to wait as long as needed to obtain the lock, otherwise (default) raise
                     OSError if the lock isn't available immediately. Self-deadlock is possible;
                     see Python fcntl.flock docs for further details.
        :param update_atime: True to 'touch -a' the file after obtaining the lock
        """
        assert self._entries, "FlockHolder.flock() used out of context"
        while True:
            realfilename = os.path.realpath(filename)
            with self._lock:  # only needed to synchronize self._flocks
                if realfilename in self._flocks and not exclusive:
                    self._logger.debug(
                        StructuredLogMessage(
                            "reuse prior flock",
                            filename=filename,
                            realpath=realfilename,
                            exclusive=self._flocks[realfilename][1],
                        )
                    )
                    return self._flocks[realfilename][0]
                openfile = open(realfilename, mode or ("r+b" if exclusive else "rb"))
                try:
                    op = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
                    if not wait:
                        op |= fcntl.LOCK_NB
                    self._logger.debug(
                        StructuredLogMessage(
                            "flock",
                            file=filename,
                            realpath=realfilename,
                            exclusive=exclusive,
                            wait=wait,
                        )
                    )
                    fcntl.flock(openfile, op)
                    # the flock will release whenever we ultimately openfile.close()

                    file_st = os.stat(openfile.fileno())
                    if update_atime:
                        os.utime(
                            openfile.fileno(), ns=(int(time.time() * 1e9), file_st.st_mtime_ns)
                        )

                    # Even if all concurrent processes obey the advisory flocks, the filename link
                    # could have been replaced or removed in the duration between our open() and
                    # fcntl() syscalls.
                    # - if it was removed, the following os.stat will trigger FileNotFoundError,
                    #   which is reasonable to propagate.
                    # - if it was replaced, the subsequent condition won't hold, and we'll loop
                    #   around to try again on the replacement file.
                    filename_st = os.stat(realfilename)
                    self._logger.debug(
                        StructuredLogMessage(
                            "flocked",
                            file=filename,
                            realpath=realfilename,
                            exclusive=exclusive,
                            name_inode=filename_st.st_ino,
                            fd_inode=file_st.st_ino,
                        )
                    )
                    if (
                        filename_st.st_dev == file_st.st_dev
                        and filename_st.st_ino == file_st.st_ino
                    ):
                        assert realfilename not in self._flocks
                        self._flocks[realfilename] = (openfile, exclusive)
                        return openfile
                except:
                    openfile.close()
                    raise
                openfile.close()
Пример #39
0
 def __enter__(self):
     fcntl.flock(self.fd, self.op)
     return self
 def _release_all_locks(self):
     filedescriptor = self._filedesc
     if filedescriptor is not None:
         fcntl.flock(filedescriptor, fcntl.LOCK_UN)
         os.close(filedescriptor)
         self._filedescriptor.remove()
Пример #41
0
 def release(self):
     if self.fd is not None:
         fd, self.fd = self.fd, None
         if fcntl is not None:
             fcntl.flock(fd, fcntl.LOCK_UN)
         os.close(fd)
Пример #42
0
 def __exit__(self, exc_type, exc_value, traceback):
     fcntl.flock(self.fd, fcntl.LOCK_UN)
Пример #43
0
 def __release(self):
     self.timer.cancel()
     self.timer = None
     fcntl.flock(self.fd, fcntl.LOCK_UN)
     os.close(self.fd)
Пример #44
0
 def acquire(self):
     self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
     fcntl.flock(self._fd, fcntl.LOCK_EX)
Пример #45
0
def main():
    f1 = open("testfile", 'w')
    f2 = open("testfile", 'w')

    fcntl.flock(f1, fcntl.LOCK_SH | fcntl.LOCK_NB)
    """
    is flock interruptible?
    """
    signal.signal(signal.SIGALRM, handler)
    signal.alarm(5)
    try:
        fcntl.flock(f2, fcntl.LOCK_EX)
    except IOError as e:
        if e.errno != errno.EINTR:
            raise
    else:
        raise RuntimeError("expect flock to block")

    fcntl.flock(f1, fcntl.LOCK_UN)

    lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0)
    try:
        fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
    except IOError as e:
        if e.errno != errno.EINVAL:
            raise
        else:
            print('kernel does not support fcntl.F_OFD_SETLK')
            return

    lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
    fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
    """
    is posix lock interruptible?
    """
    signal.signal(signal.SIGALRM, handler)
    signal.alarm(5)
    try:
        lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
        fcntl.fcntl(f2, fcntl.F_OFD_SETLKW, lockdata)
    except IOError as e:
        if e.errno != errno.EINTR:
            raise
    else:
        raise RuntimeError("expect posix lock to block")
    """
    file handler 2 should still hold lock on 10~10
    """
    try:
        lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
        fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
    except IOError as e:
        if e.errno == errno.EAGAIN:
            pass
    else:
        raise RuntimeError("expect file handler 2 to hold lock on 10~10")

    lockdata = struct.pack('hhllhh', fcntl.F_UNLCK, 0, 0, 0, 0, 0)
    fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
    fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)

    print('ok')
Пример #46
0
 def acquire(self):
     flags = os.O_CREAT | os.O_TRUNC | os.O_RDWR
     self.fd = os.open(self.filename, flags)
     if fcntl is not None:
         fcntl.flock(self.fd, fcntl.LOCK_EX)
Пример #47
0
import json 
import os 
import fcntl 
import time 
PATH = os.path.dirname(os.path.abspath(__file__))

FILE_USER_API = os.path.join('/Users/zhangkailin/zklcode/Midas_Engine','usersplit2.json')

if os.path.exists(FILE_USER_API):
    
    with open(FILE_USER_API,'r+') as jf:
        fcntl.flock(jf.fileno(),fcntl.LOCK_EX)
        
        #jf.seek(0)
        aa = json.load(jf)
        user_api_a = aa["df"]
        user_api_b = aa["df1"]
        user_api_c = aa["df2"]
        time.sleep(5)
        i=1
        while i<10000:
            user_api_a[str(i)] = i
            user_api_b[str(i)] = i
            user_api_c[str(i)] = i
            
            print("user2 : {}".format(i))
            i =i+1
                
            
        jf.seek(0)
        # jf.truncate()
Пример #48
0
def main():
    global best_top1, best_top5
    
    args.world_size = 1
    
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    # Data loading code    
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    crop_size = 224
    val_size = 256

    pipe = HybridTrainPipe(batch_size=args.train_batch, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir, crop=crop_size, dali_cpu=args.dali_cpu)
    pipe.build()
    train_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    pipe = HybridValPipe(batch_size=args.test_batch, num_threads=args.workers, device_id=args.local_rank, data_dir=valdir, crop=crop_size, size=val_size)
    pipe.build()
    val_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size))

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
                    baseWidth=args.base_width,
                    cardinality=args.cardinality,
                )
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    if args.optimizer.lower() == 'sgd':
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    elif args.optimizer.lower() == 'adamw':
        optimizer = AdamW(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay, warmup = 0)
    elif args.optimizer.lower() == 'radam':
        optimizer = RAdam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)
    elif args.optimizer.lower() == 'lsadam': 
        optimizer = LSAdamW(model.parameters(), lr=args.lr*((1.+4.*args.sigma)**(0.25)), 
                           betas=(args.beta1, args.beta2),
                           weight_decay=args.weight_decay, 
                           sigma=args.sigma)
    elif args.optimizer.lower() == 'lsradam':
        sigma = 0.1
        optimizer = LSRAdam(model.parameters(), lr=args.lr*((1.+4.*args.sigma)**(0.25)), 
                           betas=(args.beta1, args.beta2),
                           weight_decay=args.weight_decay, 
                           sigma=args.sigma)
    elif args.optimizer.lower() == 'srsgd':
        iter_count = 1
        optimizer = SGD_Adaptive(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, iter_count=iter_count, restarting_iter=args.restart_schedule[0])
    elif args.optimizer.lower() == 'sradam':
        iter_count = 1
        optimizer = SRNAdam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, restarting_iter=args.restart_schedule[0]) 
    elif args.optimizer.lower() == 'sradamw':
        iter_count = 1
        optimizer = SRAdamW(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[0]) 
    elif args.optimizer.lower() == 'srradam':
        #NOTE: need to double-check this
        iter_count = 1
        optimizer = SRRAdam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[0]) 
        
    # Resume
    title = 'ImageNet-' + args.arch
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_top1 = checkpoint['best_top1']
        best_top5 = checkpoint['best_top5']
        start_epoch = checkpoint['epoch'] - 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        iter_count = optimizer.param_groups[0]['iter_count']
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Top1', 'Valid Top1', 'Train Top5', 'Valid Top5'])
        
    logger.file.write('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))


    if args.evaluate:
        logger.file.write('\nEvaluation only')
        test_loss, test_top1, test_top5 = test(val_loader, model, criterion, start_epoch, use_cuda, logger)
        logger.file.write(' Test Loss:  %.8f, Test Top1:  %.2f, Test Top5: %.2f' % (test_loss, test_top1, test_top5))
        return

    # Train and val
    schedule_index = 1
    for epoch in range(start_epoch, args.epochs):
        if args.optimizer.lower() == 'srsgd':
            if epoch in args.schedule:
                optimizer = SGD_Adaptive(model.parameters(), lr=args.lr * (args.gamma**schedule_index), weight_decay=args.weight_decay, iter_count=iter_count, restarting_iter=args.restart_schedule[schedule_index])
                schedule_index += 1
                
        elif args.optimizer.lower() == 'sradam':
            if epoch in args.schedule:
                optimizer = SRNAdam(model.parameters(), lr=args.lr * (args.gamma**schedule_index), betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, restarting_iter=args.restart_schedule[schedule_index]) 
                schedule_index += 1
                
        elif args.optimizer.lower() == 'sradamw':
            if epoch in args.schedule:
                optimizer = SRAdamW(model.parameters(), lr=args.lr * (args.gamma**schedule_index), betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[schedule_index])
                schedule_index += 1
                
        elif args.optimizer.lower() == 'srradam':
            if epoch in args.schedule:
                optimizer = SRRAdam(model.parameters(), lr=args.lr * (args.gamma**schedule_index), betas=(args.beta1, args.beta2), iter_count=iter_count, weight_decay=args.weight_decay, warmup = 0, restarting_iter=args.restart_schedule[schedule_index])
                schedule_index += 1
            
        else:
            adjust_learning_rate(optimizer, epoch)

        logger.file.write('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
        
        if args.optimizer.lower() == 'srsgd' or args.optimizer.lower() == 'sradam' or args.optimizer.lower() == 'sradamw' or args.optimizer.lower() == 'srradam':
            train_loss, train_top1, train_top5, iter_count = train(train_loader, model, criterion, optimizer, epoch, use_cuda, logger)
        else:
            train_loss, train_top1, train_top5 = train(train_loader, model, criterion, optimizer, epoch, use_cuda, logger)

        test_loss, test_top1, test_top5 = test(val_loader, model, criterion, epoch, use_cuda, logger)

        # append logger file
        logger.append([state['lr'], train_loss, test_loss, train_top1, test_top1, train_top5, test_top5])

        writer.add_scalars('train_loss', {args.model_name: train_loss}, epoch)
        writer.add_scalars('test_loss', {args.model_name: test_loss}, epoch)
        writer.add_scalars('train_top1', {args.model_name: train_top1}, epoch)
        writer.add_scalars('test_top1', {args.model_name: test_top1}, epoch)
        writer.add_scalars('train_top5', {args.model_name: train_top5}, epoch)
        writer.add_scalars('test_top5', {args.model_name: test_top5}, epoch)

        # save model
        is_best = test_top1 > best_top1
        best_top1 = max(test_top1, best_top1)
        best_top5 = max(test_top5, best_top5)
        save_checkpoint({
                'epoch': epoch + 1,
                'schedule_index': schedule_index,
                'state_dict': model.state_dict(),
                'top1': test_top1,
                'top5': test_top5,
                'best_top1': best_top1,
                'best_top5': best_top5,
                'optimizer' : optimizer.state_dict(),
            }, is_best, epoch, checkpoint=args.checkpoint)
        
        # reset DALI iterators
        train_loader.reset()
        val_loader.reset()
        
    logger.file.write('Best top1: %f'%best_top1)
    logger.file.write('Best top5: %f'%best_top5)
    
    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))

    print('Best top1: %f'%best_top1)
    print('Best top5: %f'%best_top5)
    
    with open("./all_results_imagenet.txt", "a") as f:
        fcntl.flock(f, fcntl.LOCK_EX)
        f.write("%s\n"%args.checkpoint)
        f.write("best_top1 %f, best_top5 %f\n\n"%(best_top1,best_top5))
        fcntl.flock(f, fcntl.LOCK_UN)
Пример #49
0
def trylock(fd):
    try:
        fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
        return True
    except Exception:  # pylint: disable=broad-except
        return False
Пример #50
0
def main():
    params = Params()

    if params.get("DisableUpdates") == b"1":
        raise RuntimeError("updates are disabled by the DisableUpdates param")

    if ANDROID and os.geteuid() != 0:
        raise RuntimeError("updated must be launched as root!")

    # Set low io priority
    proc = psutil.Process()
    if psutil.LINUX:
        proc.ionice(psutil.IOPRIO_CLASS_BE, value=7)

    ov_lock_fd = open(LOCK_FILE, 'w')
    try:
        fcntl.flock(ov_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        raise RuntimeError(
            "couldn't get overlay lock; is another updated running?") from e

    # Wait for IsOffroad to be set before our first update attempt
    wait_helper = WaitTimeHelper(proc)
    wait_helper.sleep(30)

    first_run = True
    last_fetch_time = 0
    update_failed_count = 0

    # Run the update loop
    #  * every 1m, do a lightweight internet/update check
    #  * every 10m, do a full git fetch
    while not wait_helper.shutdown:
        update_now = wait_helper.ready_event.is_set()
        wait_helper.ready_event.clear()

        # Don't run updater while onroad or if the time's wrong
        time_wrong = datetime.datetime.utcnow().year < 2019
        is_onroad = params.get("IsOffroad") != b"1"
        if is_onroad or time_wrong:
            wait_helper.sleep(30)
            cloudlog.info("not running updater, not offroad")
            continue

        # Attempt an update
        exception = None
        new_version = False
        update_failed_count += 1
        try:
            init_overlay()

            internet_ok, update_available = check_for_update()
            if internet_ok and not update_available:
                update_failed_count = 0

            # Fetch updates at most every 10 minutes
            if internet_ok and (update_now or
                                time.monotonic() - last_fetch_time > 60 * 10):
                new_version = fetch_update(wait_helper)
                update_failed_count = 0
                last_fetch_time = time.monotonic()

                if first_run and not new_version and os.path.isdir(
                        NEOSUPDATE_DIR):
                    shutil.rmtree(NEOSUPDATE_DIR)
                first_run = False
        except subprocess.CalledProcessError as e:
            cloudlog.event("update process failed",
                           cmd=e.cmd,
                           output=e.output,
                           returncode=e.returncode)
            exception = f"command failed: {e.cmd}\n{e.output}"
        except Exception as e:
            cloudlog.exception("uncaught updated exception, shouldn't happen")
            exception = str(e)

        set_params(new_version, update_failed_count, exception)
        wait_helper.sleep(60)

    dismount_overlay()
Пример #51
0
def lock(fd):
    fcntl.flock(fd, fcntl.LOCK_EX)
Пример #52
0
 def _acquire_flock(self):
     self.fd = os.open(self.lockpath, os.O_RDWR)
     fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
     return True
Пример #53
0
 def block_file(path, lock):
     with open(path, 'r+') as f:
         fcntl.flock(f, fcntl.LOCK_EX)
         lock.acquire()
         fcntl.flock(f, fcntl.LOCK_UN)
Пример #54
0
def unlock(fd):
    fcntl.flock(fd, fcntl.LOCK_UN)
Пример #55
0
def daemonize_process(pid_file: str,
                      logger: logging.Logger,
                      chdir: str = "/") -> None:
    """daemonize the current process

    This calls fork(), and has the main process exit. When it returns we will be
    running in the child process.
    """

    # If pidfile already exists, we should read pid from there; to overwrite it, if
    # locking will fail, because locking attempt somehow purges the file contents.
    if os.path.isfile(pid_file):
        with open(pid_file) as pid_fh:
            old_pid = pid_fh.read()

    # Create a lockfile so that only one instance of this daemon is running at any time.
    try:
        lock_fh = open(pid_file, "w")
    except OSError:
        print("Unable to create the pidfile.")
        sys.exit(1)

    try:
        # Try to get an exclusive lock on the file. This will fail if another process
        # has the file locked.
        fcntl.flock(lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except OSError:
        print("Unable to lock on the pidfile.")
        # We need to overwrite the pidfile if we got here.
        #
        # XXX better to avoid overwriting it, surely. this looks racey as the pid file
        # could be created between us trying to read it and us trying to lock it.
        with open(pid_file, "w") as pid_fh:
            pid_fh.write(old_pid)
        sys.exit(1)

    # Fork, creating a new process for the child.
    process_id = os.fork()

    if process_id != 0:
        # parent process: exit.

        # we use os._exit to avoid running the atexit handlers. In particular, that
        # means we don't flush the logs. This is important because if we are using
        # a MemoryHandler, we could have logs buffered which are now buffered in both
        # the main and the child process, so if we let the main process flush the logs,
        # we'll get two copies.
        os._exit(0)

    # This is the child process. Continue.

    # Stop listening for signals that the parent process receives.
    # This is done by getting a new process id.
    # setpgrp() is an alternative to setsid().
    # setsid puts the process in a new parent group and detaches its controlling
    # terminal.

    os.setsid()

    # point stdin, stdout, stderr at /dev/null
    devnull = "/dev/null"
    if hasattr(os, "devnull"):
        # Python has set os.devnull on this system, use it instead as it might be
        # different than /dev/null.
        devnull = os.devnull

    devnull_fd = os.open(devnull, os.O_RDWR)
    os.dup2(devnull_fd, 0)
    os.dup2(devnull_fd, 1)
    os.dup2(devnull_fd, 2)
    os.close(devnull_fd)

    # now that we have redirected stderr to /dev/null, any uncaught exceptions will
    # get sent to /dev/null, so make sure we log them.
    #
    # (we don't normally expect reactor.run to raise any exceptions, but this will
    # also catch any other uncaught exceptions before we get that far.)

    def excepthook(type_, value, traceback):
        logger.critical("Unhanded exception",
                        exc_info=(type_, value, traceback))

    sys.excepthook = excepthook

    # Set umask to default to safe file permissions when running as a root daemon. 027
    # is an octal number which we are typing as 0o27 for Python3 compatibility.
    os.umask(0o27)

    # Change to a known directory. If this isn't done, starting a daemon in a
    # subdirectory that needs to be deleted results in "directory busy" errors.
    os.chdir(chdir)

    try:
        lock_fh.write("%s" % (os.getpid()))
        lock_fh.flush()
    except OSError:
        logger.error("Unable to write pid to the pidfile.")
        print("Unable to write pid to the pidfile.")
        sys.exit(1)

    # write a log line on SIGTERM.
    def sigterm(signum, frame):
        logger.warning("Caught signal %s. Stopping daemon." % signum)
        sys.exit(0)

    signal.signal(signal.SIGTERM, sigterm)

    # Cleanup pid file at exit.
    def exit():
        logger.warning("Stopping daemon.")
        os.remove(pid_file)
        sys.exit(0)

    atexit.register(exit)

    logger.warning("Starting daemon.")
Пример #56
0
    n_datapoints = len(diff_rad)

    # Permute d_stim.
    for i_perm in range(n_permutations):
        ind = np.random.choice(np.arange(n_datapoints), n_datapoints)
        params[i_perm, :] = ba.fit_dog(resid_error_rad[ind], diff_rad[ind])

    # Write to the file.
    flags = os.O_CREAT | os.O_WRONLY
    results_dir = os.path.join(package_dir, 'results', task_name)
    try:
        os.makedirs(results_dir)
    except OSError:
        pass
    if isinstance(sub_num, int):
        sub_name = '%03d' % sub_num
    else:
        sub_name = '_'.join(['%03d' % (n, ) for n in sub_num])
    if task_name != '':
        f_name = os.path.join(
            results_dir, 'bootstrap_dog_d%02d_s%s_%s.txt' %
            (only_delay, sub_name, task_name))
    else:
        f_name = os.path.join(
            results_dir,
            'bootstrap_dog_d%02d_s%s.txt' % (only_delay, sub_name))
    file_handle = os.open(f_name, flags)
    fcntl.flock(file_handle, fcntl.LOCK_EX)
    with os.fdopen(file_handle, 'a') as f:
        np.savetxt(f, params)
Пример #57
0
def readSelection(inputSamPath,
                  outputDir,
                  umiLen,
                  cellBarcodeLen,
                  sampleIndLen,
                  maxReadsOffset,
                  selectOne=False,
                  compress=True,
                  paired=False,
                  strict=True,
                  maxPairOffset=100000000,
                  checker=False):
    '''
    use alignment results to select valid reads from reads with the same UMI
    
    valids reads must be successfully mapped to the referance genome, 
    as well as be mapped to the most popular chromosome within reads that 
    share the same UMI, and have mapped locations on the chromosome within 
    maxReadsOffset of the median mapped location
    '''
    path0 = os.getcwd()
    colNames = [
        'qname', 'flag', 'rname', 'pos', 'mapq', 'CIGAR', 'rnext', 'pnext',
        'templ_len', 'seq', 'qual', 'AS', 'XN', 'XM', 'XO', 'XG', 'NM', 'MD',
        'YS', 'YT'
    ]
    rml = [
        'mapq', 'CIGAR', 'rnext', 'pnext', 'templ_len', 'AS', 'XN', 'XM', 'XO',
        'XG', 'NM', 'MD', 'YS', 'YT'
    ]
    sam = pd.read_csv(inputSamPath,
                      sep='\t',
                      header=None,
                      names=colNames,
                      index_col=False,
                      lineterminator='\n')
    for x in rml:
        del sam[f'{x}']
    headers = sam.qname
    samFlags = sam.flag
    umi = [line[:umiLen] for line in headers]
    headers = [('@' + x[umiLen:]) for x in headers]
    pos = sam.pos
    rname = sam.rname
    seqs = sam.seq
    quals = sam.qual
    del sam
    seqLens = [len(x) for x in seqs]
    saml = list(zip(umi, headers, rname, seqLens, pos, seqs, quals, samFlags))
    nReads0 = len(saml)
    #remove reads that failed to align
    saml = [x for x in saml if x[2] != '*']
    saml.sort()
    #reverse seqs that was reverse complemented during alignment
    saml = [(x[0], x[1], x[2], x[3], x[4], revComp(x[5], 'c'),
             revComp(x[6])) if x[7] & 16 else
            (x[0], x[1], x[2], x[3], x[4], x[5], x[6]) for x in saml]
    rname = list(zip(*saml))[2]
    umi = list(zip(*saml))[0]
    umiN = Counter(umi)
    lcell = len(saml)
    #determine which reads to select
    selectl = np.zeros(lcell, dtype=bool)
    i = 0
    while i < lcell:
        if paired:
            nreads = umiN[saml[i][0]]
            chrs = rname[i:(i + nreads)]
            chrscounter = Counter(chrs)
            chrsmode = chrscounter.most_common()[0][0]
            j = 0
            poslistOdd = []
            TposlistOdd = []
            poslistEven = []
            TposlistEven = []
            while j < nreads:
                if (j % 2) == 1:
                    TposlistOdd.append(saml[i + j][4])
                    if chrs[j] == chrsmode:
                        poslistOdd.append(saml[i + j][4])
                    j += 1
                else:
                    TposlistEven.append(saml[i + j][4])
                    if chrs[j] == chrsmode:
                        poslistEven.append(saml[i + j][4])
                    j += 1
            npairs = len(poslistOdd)
            medposOdd = median(poslistOdd)
            medposEven = median(poslistEven)
            if selectOne:
                k = 0
                prevreadOffset = 1000000
                while k < npairs:
                    totalOffset = abs(TposlistOdd[k] -
                                      medposOdd) + abs(TposlistEven[k] -
                                                       medposEven)
                    if (rname[i + (2 * k)]
                            == chrsmode) and (totalOffset < prevreadOffset):
                        medPairInd = k
                        prevreadOffset = totalOffset
                    k += 1
                selectl[i + (2 * medPairInd)] = True
                selectl[i + (2 * medPairInd) + 1] = True
            else:
                k = 0
                while k < npairs:
                    oddOffset = abs(TposlistOdd[k] - medposOdd)
                    evenOffset = abs(TposlistEven[k] - medposEven)
                    pairOffset = abs(TposlistOdd[k] - poslistEven[k])
                    #strict qc: both reads in a pair must align to within cutoff
                    if strict:
                        if (rname[i + (2 * k)] == chrsmode) and (
                                oddOffset < maxReadsOffset) and (
                                    evenOffset < maxReadsOffset) and (
                                        pairOffset < maxPairOffset):
                            selectl[i + (2 * k)] = True
                            selectl[i + (2 * k) + 1] = True
                    #lenient qc: only one read in a pair must align to within cutoff
                    else:
                        if (rname[i + (2 * k)] == chrsmode) and (
                            (oddOffset < maxReadsOffset) or
                            (evenOffset < maxReadsOffset)):
                            if pairOffset < maxPairOffset:
                                selectl[i + (2 * k)] = True
                                selectl[i + (2 * k) + 1] = True
                    k += 1
        else:
            nreads = umiN[saml[i][0]]
            chrs = rname[i:(i + nreads)]
            chrscounter = Counter(chrs)
            chrsmode = chrscounter.most_common()[0][0]
            j = 0
            poslist = []
            while j < nreads:
                if chrs[j] == chrsmode:
                    poslist.append(saml[i + j][4])
                j += 1
            medpos = median(poslist)
            if selectOne:
                k = 0
                prevreadOffset = 1000000
                while k < nreads:
                    offset = abs(saml[i + k][4] - medpos)
                    if (rname[i + k]
                            == chrsmode) and (offset < prevreadOffset):
                        medInd = k
                        prevreadOffset = offset
                    k += 1
                selectl[i + medInd] = True
            else:
                k = 0
                while k < nreads:
                    if (rname[i + k] == chrsmode) and (
                        (saml[i + k][4] - medpos) < maxReadsOffset):
                        selectl[i + k] = True
                    k += 1
        i += nreads

    nReadsSelected = 0
    if paired:
        writeBufR1 = []
        writeBufR2 = []
        i = 0
        while i < lcell:
            if selectl[i]:
                if i % 2 == 0:
                    currentRead = saml[i][1] + '\n' + saml[i][
                        5] + '\n' + '+\n' + saml[i][6] + '\n'
                    writeBufR1.append(currentRead)
                    nReadsSelected += 1
                else:
                    currentRead = saml[i][1] + '\n' + saml[i][
                        5] + '\n' + '+\n' + saml[i][6] + '\n'
                    writeBufR2.append(currentRead)
                    nReadsSelected += 1
            i += 1
        writeBufR1 = ''.join(writeBufR1)
        writeBufR2 = ''.join(writeBufR2)
    else:
        writeBuf = []
        i = 0
        for i in range(lcell):
            if selectl[i]:
                currentRead = saml[i][1] + '\n' + saml[i][
                    5] + '\n' + '+\n' + saml[i][6] + '\n'
                writeBuf.append(currentRead)
                nReadsSelected += 1
        writeBuf = ''.join(writeBuf)

    os.chdir(outputDir)

    #testing
    if checker:
        pick = list(zip(selectl, saml))
        with open(f'{inputSamPath}_selection', 'wb') as f:
            pickle.dump(pick, f)

    cellName = inputSamPath[-(cellBarcodeLen + 4):-4]
    if compress:
        if paired:
            bwriteBufR1 = writeBufR1.encode()
            with gzip.open(f'cell_{cellName}_R1.fastq.gz', 'ab') as f:
                f.write(bwriteBufR1)
            bwriteBufR2 = writeBufR2.encode()
            with gzip.open(f'cell_{cellName}_R2.fastq.gz', 'ab') as f:
                f.write(bwriteBufR2)
        else:
            bwriteBuf = writeBuf.encode()
            with gzip.open(f'cell_{cellName}.fastq.gz', 'ab') as f:
                f.write(bwriteBuf)
    else:
        if paired:
            with open(f'cell_{cellName}_R1.fastq', 'a') as f:
                f.write(writeBufR1)
            with open(f'cell_{cellName}_R2.fastq', 'a') as f:
                f.write(writeBufR2)
        else:
            with open(f'cell_{cellName}.fastq', 'a') as f:
                f.write(writeBuf)
    os.chdir(path0)
    summary = [cellName, nReads0, lcell, nReadsSelected]
    with open('readSelection', 'ab') as f:
        fcntl.flock(f, fcntl.LOCK_EX)
        pickle.dump(summary, f)
        fcntl.flock(f, fcntl.LOCK_UN)
def endDowntime( fname, end_time=None ):

        if end_time is None:
            end_time = long(time.time())
    
        try:
            fd = open(fname, 'r+')
        except IOError:
            return 0 # no file -> nothing to end

        try:
            fcntl.flock( fd, fcntl.LOCK_EX | fcntl.LOCK_NB  )
            # read the old info
            inlines = fd.readlines()

            outlines=[]
            lnr=0
            closed_nr=0


            for long_line in inlines:
                lnr+=1
                line = long_line.strip()

                if len(line)==0:
                    outlines.append(long_line)
                    continue # pass on empty lines
                if line[0:1]=='#':
                    outlines.append(long_line)
                    continue # pass on comments

                arr = line.split()
                if len(arr)<2:
                    outlines.append(long_line)
                    continue # pass on malformed lines

                #make sure this is for the right entry
                #if ((entry!="All")and(len(arr)>2)and(entry!=arr[2])):
                #    outlines.append(long_line)
                #    continue
                #if ((entry=="All")and(len(arr)>2)and("factory"==arr[2])):
                #    outlines.append(long_line)
                #    continue
                #if ((frontend!="All")and(len(arr)>3)and(frontend!=arr[3])):
                #    outlines.append(long_line)
                #    continue
                #make sure that this time tuple applies to this security_class
                #if ((security_class!="All")and(len(arr)>4)and(security_class!=arr[4])):
                #    outlines.append(long_line)
                #    continue

                cur_start_time = 0
                cur_end_time = 0
                if arr[0] != 'None':
                    cur_start_time = timeConversion.extractISO8601_Local( arr[0] )
                if arr[1] != 'None':
                    cur_end_time   = timeConversion.extractISO8601_Local( arr[1] )
                # open period -> close
                if arr[1] == 'None' or (  (cur_start_time < long(time.time())) and (cur_end_time > end_time)  ): 
                    outlines.append("%-30s %-30s" % (arr[0], timeConversion.getISO8601_Local(end_time)) )
                    outlines.append("\n")
                    closed_nr += 1
                else:
                    outlines.append( long_line ) # closed just pass on

                #Keep parsing file, since there may be multiple downtimes
                #pass # end for

            # go back to start to rewrite
            fd.seek(0)
            fd.writelines(outlines)
            fd.truncate()
        finally:
            fd.close()

        return closed_nr
Пример #59
0
def sortSam(inputDir,
            outputDir,
            chunkDir,
            sam_name,
            r1chunk,
            i1chunk,
            set_name,
            sampleIndLen,
            barLen,
            umiLen,
            ignoreSampleInd,
            paired=False):
    '''
    sort reads in SAM files by sample index and cell barcode and store as 
    seperate SAM files
    '''
    path0 = os.getcwd()
    os.chdir(inputDir)
    fs = open(sam_name)
    fR1 = open(f'{chunkDir}/{r1chunk}')
    if not ignoreSampleInd:
        fI1 = open(f'{chunkDir}/{i1chunk}')
    try:
        print(f'sorting {sam_name}...')
        t = time()
        #load inputs
        os.chdir(inputDir)
        if ignoreSampleInd:
            with open(set_name, 'rb') as f1:  #bar_set
                bar_set = pickle.load(f1)
        else:
            with open(set_name, 'rb') as f1:  #path_set
                path_set = pickle.load(f1)
        os.chdir(outputDir)
        #seperate reads by barcodes and write in blocks
        #   sample line: 'D000684:779:H53GNBCXY:1:1110:3316:42789_sample_GGGCAAAT_cell_TTCTACAGTGACTCAT_umi_AGGCGGTTGG\t16\tchr5\t82276115\t1\...'
        writeDict = {}
        count = 0
        n = 0
        writeCount = 0
        notEOF = True
        while notEOF:
            line = fs.readline()
            if paired:
                line2 = fs.readline()
            if line:
                count += 1
                rdump = fR1.readline()
                r1seq = fR1.readline()
                rdump = fR1.readline()
                rdump = fR1.readline()
                if not ignoreSampleInd:
                    rdump = fI1.readline()
                    i1seq = fI1.readline()
                    rdump = fI1.readline()
                    rdump = fI1.readline()
                    sampleName = i1seq[:sampleIndLen]
                else:
                    sampleName = 'sampleID'
                cellName = r1seq[:barLen]
                umi = r1seq[barLen:(barLen + umiLen)]
                line = umi + line
                if paired:
                    line2 = umi + line2
                cellPath = f'sample_{sampleName}/cell_{cellName}'
                n += 1
                if ignoreSampleInd:
                    if cellName in bar_set:
                        if cellName in writeDict:
                            writeDict[cellName].append(line)
                            if paired:
                                writeDict[cellName].append(line2)
                                writeCount += 1
                        else:
                            writeDict[cellName] = [line]
                            if paired:
                                writeDict[cellName].append(line2)
                                writeCount += 1
                        writeCount += 1
                else:
                    if cellPath in path_set:
                        if cellPath in writeDict:
                            writeDict[cellPath].append(line)
                            if paired:
                                writeDict[cellPath].append(line2)
                                writeCount += 1
                        else:
                            writeDict[cellPath] = [line]
                            if paired:
                                writeDict[cellPath].append(line2)
                                writeCount += 1
                        writeCount += 1
                if n == 500000:
                    n = 0
                    for cell in writeDict:
                        w = ''
                        w = w.join(writeDict[cell])
                        with open(f'{cell}.sam', 'a') as f:
                            fcntl.flock(f, fcntl.LOCK_EX)
                            f.write(w)
                            fcntl.flock(f, fcntl.LOCK_UN)
                    writeDict = {}
            else:
                notEOF = False
                if writeDict:
                    for cell in writeDict:
                        w = ''
                        w = w.join(writeDict[cell])
                        with open(f'{cell}.sam', 'a') as f:
                            fcntl.flock(f, fcntl.LOCK_EX)
                            f.write(w)
                            fcntl.flock(f, fcntl.LOCK_UN)
    finally:
        fs.close()
        fR1.close()
        if not ignoreSampleInd:
            fI1.close()
        print(
            f'{count} reads total from {sam_name}, {writeCount} written, {time() - t} s'
        )
        os.chdir(path0)
Пример #60
0
 def write_cmd(self):
     fcntl.flock(self.file.fileno(), fcntl.LOCK_EX)
     self.file.seek(0, 0)
     self.file.write(struct.pack('dd', *self.currcmd))
     self.file.flush()
     fcntl.flock(self.file.fileno(), fcntl.LOCK_UN)