コード例 #1
0
ファイル: __init__.py プロジェクト: taishi8117/turbinia
    def run_wrapper(self, evidence):
        """Wrapper to manage TurbiniaTaskResults and exception handling.

    This wrapper should be called to invoke the run() methods so it can handle
    the management of TurbiniaTaskResults and the exception handling.  Otherwise
    details from exceptions in the worker cannot be propagated back to the
    Turbinia TaskManager.

    This method should handle (in no particular order):
      - Exceptions thrown from run()
      - Verifing valid TurbiniaTaskResult object is returned
          - Check for bad results (non TurbiniaTaskResults) returned from run()
          - Auto-close results that haven't been closed
          - Verifying that the results are serializeable
      - Locking to make sure only one task is active at a time

    Args:
      evidence: Evidence object

    Returns:
      A TurbiniaTaskResult object
    """
        with filelock.FileLock(config.LOCK_FILE):
            log.info('Starting Task {0:s} {1:s}'.format(self.name, self.id))
            original_result_id = None
            try:
                self.result = self.setup(evidence)
                original_result_id = self.result.id

                if self.turbinia_version != turbinia.__version__:
                    msg = 'Worker V-{0:s} and server V-{1:s} version do not match'.format(
                        self.turbinia_version, turbinia.__version__)
                    log.error(msg)
                    self.result.log(msg)
                    self.result.set_error(msg)
                    self.result.status = msg
                    return self.result

                self._evidence_config = evidence.config
                self.result = self.run(evidence, self.result)
            # pylint: disable=broad-except
            except Exception as e:
                msg = '{0:s} Task failed with exception: [{1!s}]'.format(
                    self.name, e)
                log.error(msg)
                log.error(traceback.format_exc())
                if self.result:
                    self.result.log(msg)
                    self.result.log(traceback.format_exc())
                    if hasattr(e, 'message'):
                        self.result.set_error(e.message,
                                              traceback.format_exc())
                    else:
                        self.result.set_error(e.__class__,
                                              traceback.format_exc())
                    self.result.status = msg
                else:
                    log.error(
                        'No TurbiniaTaskResult object found after task execution.'
                    )

            self.result = self.validate_result(self.result)

            # Trying to close the result if possible so that we clean up what we can.
            # This has a higher likelihood of failing because something must have gone
            # wrong as the Task should have already closed this.
            if self.result and not self.result.closed:
                msg = 'Trying last ditch attempt to close result'
                log.warning(msg)
                self.result.log(msg)

                if self.result.status:
                    status = self.result.status
                else:
                    status = 'No previous status'
                msg = (
                    'Task Result was auto-closed from task executor on {0:s} likely '
                    'due to previous failures.  Previous status: [{1:s}]'.
                    format(self.result.worker_name, status))
                self.result.log(msg)
                try:
                    self.result.close(self, False, msg)
                # Using broad except here because lots can go wrong due to the reasons
                # listed above.
                # pylint: disable=broad-except
                except Exception as e:
                    log.error(
                        'TurbiniaTaskResult close failed: {0!s}'.format(e))
                    if not self.result.status:
                        self.result.status = msg
                # Check the result again after closing to make sure it's still good.
                self.result = self.validate_result(self.result)

        if original_result_id != self.result.id:
            log.debug(
                'Result object {0:s} is different from original {1!s} after task '
                'execution which indicates errors during execution'.format(
                    self.result.id, original_result_id))
        else:
            log.debug(
                'Returning original result object {0:s} after task execution'.
                format(self.result.id))
        # TODO(aarontp): Find a better way to ensure this gets unset.
        self.output_manager = None
        return self.result
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(description='recreate directory '
                                                 'structure just '
                                                 'from files that match'
                                                 'certain criteria')

    parser.add_argument('sourceDir')
    parser.add_argument('destDir')
    parser.add_argument('-k', '--keyword', required=True, action='append',
                        help='EXIF keyword(s)')
    parser.add_argument('-s', '--suffix', action='append',
                        help='Suffix(es) of the files to work on')
    parser.add_argument('-S', '--stripcount', default=1,
                        help='Number of path components to strip from '
                             'sourceDir')
    parser.add_argument('--symlink', action='store_true',
                        help='create symlinks instead of copying files')
    parser.add_argument('-l', '--loglevel', action=LogLevelAction,
                        help='Set log level (e.g. \"ERROR\")',
                        default=logging.INFO)

    try:
        args = parser.parse_args()
    except ValueError as e:
        print("Argument parsing failed: {}".format(e), file=sys.stderr)
        sys.exit(1)

    logger = logging.getLogger(__package__)
    logger.setLevel(args.loglevel)
    handler = logging.StreamHandler()
    logger.addHandler(handler)

    if not args.keyword or len(args.keyword) == 0:
        logger.error("No keywords specified")
        sys.exit(1)

    check_dir(args.sourceDir)
    check_dir(args.destDir)

    docopy = True
    if args.symlink:
        docopy = False

    lock = filelock.FileLock(os.path.join(tempfile.gettempdir(),
                                          "{}.lock".
                                          format(os.path.
                                                 basename(sys.argv[0]))))
    try:
        with lock.acquire(timeout=0):
            # To prevent iptcinfo3 from emitting warnings on files without
            # IPTC data.
            iptc_logger = logging.getLogger('iptcinfo')
            iptc_logger.setLevel(logging.ERROR)

            backup_dir(args.sourceDir, args.destDir, docopy,
                       args.keyword, args.stripcount, args.suffix)
    except filelock.Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(1)

    logging.shutdown()
コード例 #3
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    for path in sum(args.config, []):
        wuji.config.load(config, path)
    for cmd in sum(args.modify, []):
        wuji.config.modify(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    if args.log is None:
        args.log = wuji.config.digest(config)
    ea = functools.reduce(lambda x, wrap: wrap(x), map(wuji.parse.instance, config.get('ea', 'optimizer').split('\t')))
    root = os.path.expanduser(os.path.expandvars(config.get('model', 'root')))
    os.makedirs(root, exist_ok=True)
    with filelock.FileLock(root + '.lock', 0):
        if args.delete:
            logging.warning('delete model directory: ' + root)
            shutil.rmtree(root, ignore_errors=True)
        os.makedirs(root, exist_ok=True)
    logging.info('cd ' + os.getcwd() + ' && ' + subprocess.list2cmdline([sys.executable] + sys.argv))
    logging.info('sys.path=' + ' '.join(sys.path))
    ray.init(**wuji.ray.init(config))
    timer = {key[5:]: (lambda: False) if value is None else wuji.counter.Time(humanfriendly.parse_timespan(value)) for key, value in vars(args).items() if key.startswith('time_')}
    if isinstance(timer['track'], wuji.counter.Time):
        tracemalloc.start()
    kwargs = {key: value for key, value in vars(args).items() if key not in {'config', 'modify'}}
    kwargs['root'] = root
    try:
        seed = config.getint('config', 'seed')
        wuji.random.seed(seed, prefix=f'seed={seed}: ')
        kwargs['seed'] = seed
    except configparser.NoOptionError:
        pass
    stopper = functools.reduce(lambda x, wrap: wrap(x), map(wuji.parse.instance, config.get('ea', 'stopper').split('\t')))
    with contextlib.closing(ea(config, **kwargs)) as ea, contextlib.closing(stopper(ea)) as stopper, tqdm.tqdm(initial=ea.cost) as pbar, filelock.FileLock(root + '.lock', 0):
        root_log = ea.kwargs['root_log']
        os.makedirs(root_log, exist_ok=True)
        with open(root_log + '.ini', 'w') as f:
            config.write(f)
        logging.info(f'CUDA_VISIBLE_DEVICES= pushd "{os.path.dirname(root_log)}" && tensorboard --logdir {os.path.basename(root_log)}; popd')
        try:
            logging.info(', '.join([key + '=' + humanfriendly.format_size(functools.reduce(lambda x, wrap: wrap(x), map(wuji.parse.instance, encoding['agent']['eval'])).nbytes(random.choice(ea.population)['decision'][key]) if 'agent' in encoding else random.choice(ea.population)['decision'][key].nbytes) for key, encoding in ea.context['encoding'].items()]))
        except:
            traceback.print_exc()
        try:
            while True:
                outcome = ea(pbar=pbar)
                if timer['track']():
                    snapshot = tracemalloc.take_snapshot()
                    stats = snapshot.statistics('lineno')
                    for stat in stats[:10]:
                        print(stat)
                if timer['gc']():
                    gc.collect()
                    logging.warning('gc.collect')
                if stopper(outcome):
                    break
            logging.info('stopped')
        except KeyboardInterrupt:
            logging.warning('keyboard interrupted')
        ea.recorder(force=True)
        path = os.path.join(root, f'{ea.cost}.pth')
        logging.info(path)
        torch.save(ea.__getstate__(), path)
        try:
            wuji.file.tidy(root, config.getint('model', 'keep'))
        except configparser.NoOptionError:
            logging.warning(f'keep all models in {root}')
コード例 #4
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.do_stemming = True  # turn off as needed
        self.do_lemmatization = True  # turn off as needed
        self.remove_stopwords = True  # turn off as needed

        import nltk
        nltk_data_path = os.path.join(user_dir(),
                                      config.contrib_env_relative_directory,
                                      "nltk_data")
        nltk_temp_path = os.path.join(user_dir(), "nltk_data")
        nltk.data.path.append(nltk_data_path)
        os.makedirs(nltk_data_path, exist_ok=True)
        nltk_download_lock_file = os.path.join(nltk_data_path, "nltk.lock")
        with filelock.FileLock(nltk_download_lock_file):
            nltk.download('stopwords', download_dir=nltk_data_path)
            nltk.download('punkt', download_dir=nltk_data_path)
            nltk.download('averaged_perceptron_tagger',
                          download_dir=nltk_data_path)
            nltk.download('maxent_treebank_pos_tagger',
                          download_dir=nltk_data_path)
            nltk.download('wordnet', download_dir=nltk_data_path)
            nltk.download('sonoritysequencing', download_dir=nltk_data_path)

        # download resources for stemming if needed
        if self.do_stemming:
            try:
                self.stemmer = nltk.stem.porter.PorterStemmer()
                self.stemmer.stem("test")
            except LookupError:
                os.makedirs(nltk_data_path, exist_ok=True)
                os.makedirs(nltk_temp_path, exist_ok=True)
                tokenizer_path = os.path.join(nltk_data_path, "tokenizers")
                os.makedirs(tokenizer_path, exist_ok=True)
                file1 = download(
                    "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/tokenizers/punkt.zip",
                    dest_path=nltk_temp_path)
                self.unzip_file(file1, tokenizer_path)
                self.atomic_copy(file1, tokenizer_path)
                self.stemmer = nltk.stem.porter.PorterStemmer()
                self.stemmer.stem("test")

        # download resources for lemmatization if needed
        if self.do_lemmatization:
            try:
                from nltk.corpus import wordnet
                self.lemmatizer = nltk.stem.WordNetLemmatizer()
                self.pos_tagger = nltk.pos_tag
                self.lemmatizer.lemmatize("test", wordnet.NOUN)
                self.pos_tagger("test")
            except LookupError:
                os.makedirs(nltk_data_path, exist_ok=True)
                os.makedirs(nltk_temp_path, exist_ok=True)
                tagger_path = os.path.join(nltk_data_path, "taggers")
                corpora_path = os.path.join(nltk_data_path, "corpora")
                os.makedirs(tagger_path, exist_ok=True)
                os.makedirs(corpora_path, exist_ok=True)
                file1 = download(
                    "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/taggers/averaged_perceptron_tagger.zip",
                    dest_path=nltk_temp_path)
                file2 = download(
                    "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/taggers/maxent_treebank_pos_tagger.zip",
                    dest_path=nltk_temp_path)
                file3 = download(
                    "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/wordnet.zip",
                    dest_path=nltk_temp_path)
                self.unzip_file(file1, tagger_path)
                self.unzip_file(file2, tagger_path)
                self.unzip_file(file3, corpora_path)
                self.atomic_copy(file1, tagger_path)
                self.atomic_copy(file2, tagger_path)
                self.atomic_copy(file3, corpora_path)
                from nltk.corpus import wordnet
                self.lemmatizer = nltk.stem.WordNetLemmatizer()
                self.pos_tagger = nltk.pos_tag
                self.lemmatizer.lemmatize("test", wordnet.NOUN)
                self.pos_tagger("test")
            self.wordnet_map = {
                "N": wordnet.NOUN,
                "V": wordnet.VERB,
                "J": wordnet.ADJ,
                "R": wordnet.ADV,
                "O": wordnet.NOUN
            }

        # download resources for stopwords if needed
        if self.remove_stopwords:
            try:
                self.stopwords = set(nltk.corpus.stopwords.words('english'))
            except LookupError:
                os.makedirs(nltk_data_path, exist_ok=True)
                os.makedirs(nltk_temp_path, exist_ok=True)
                corpora_path = os.path.join(nltk_data_path, "corpora")
                os.makedirs(corpora_path, exist_ok=True)
                file1 = download(
                    "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/stopwords.zip",
                    dest_path=nltk_temp_path)
                self.unzip_file(file1, corpora_path)
                self.atomic_copy(file1, corpora_path)
                self.stopwords = set(nltk.corpus.stopwords.words('english'))
コード例 #5
0
ファイル: tasks.py プロジェクト: madjarevicn/whois
def update_from_ldap(server, username, password, schema, pull):
    pid = os.getpid()

    print >> sys.stderr, '[%5d] Aquire lock...' % pid

    lock = filelock.FileLock("/tmp/ldap_update.lock")

    try:
        with lock.acquire(timeout=10):
            print >> sys.stderr, '[%5d] Got lock, now running ldap update...' % pid
            print >> sys.stderr, '[%5d] Server: %s, Username: %s, Schema: %s' % (
                pid, server, username, schema)

            if pull:
                os.environ['LDAP_USERNAME'] = username
                os.environ['LDAP_PASSWORD'] = password
                os.environ['LDAP_SERVER'] = server
                os.environ['LDAP_BASE_DN'] = schema

                command = '/usr/bin/python /app/scripts/ldapdump.py'

                print >> sys.stderr, '[%5d] Execute: %s' % (pid, command)

                ret = os.system(command)

                os.environ['LDAP_USERNAME'] = ''
                os.environ['LDAP_PASSWORD'] = ''
                os.environ['LDAP_SERVER'] = ''
                os.environ['LDAP_BASE_DN'] = ''

                if ret != 0:
                    print >> sys.stderr, '[%5d] Dump failed, aborting' % (pid)
                    return None

            command = '/usr/bin/python /app/scripts/ldapmunge.py'
            print >> sys.stderr, '[%5d] Execute: %s' % (pid, command)
            if 0 != os.system(command):
                print >> sys.stderr, '[%5d] Munge failed, aborting' % (pid)
                return None

            records = json.load(open(USER_JSON_FILENAME))
            total_records = len(records)

            es = Elasticsearch("http://*****:*****@/app/scripts/index.json"'''
            if 0 != os.system(command):
                print >> sys.stderr, '[%5d] Upload of index failed, aborting' % (
                    pid)
                return None

            print >> sys.stderr, "[%5d] Uploading %d indices to elasticsearch..." % (
                pid, total_records)
            for record in records:
                username = record['username']
                es.create(index=INDEX_NAME,
                          doc_type=DOC_TYPE,
                          body=record,
                          id=username)

            print >> sys.stderr, '[%5d] Done.' % pid
    except filelock.Timeout:
        print >> sys.stderr, '[%5d] Failed to aquire lock, skipping task.' % pid

    return None
コード例 #6
0
ファイル: cached_download.py プロジェクト: zsaviva/gdown
def cached_download(
    url,
    path=None,
    md5=None,
    quiet=False,
    postprocess=None,
    proxy=None,
    speed=None,
):
    """Cached downlaod from URL.

    Parameters
    ----------
    url: str
        URL. Google Drive URL is also supported.
    path: str, optional
        Output filename. Default is basename of URL.
    md5: str, optional
        Expected MD5 for specified file.
    quiet: bool
        Suppress terminal output. Default is False.
    postprocess: callable
        Function called with filename as postprocess.
    proxy: str
        Proxy.
    speed: float
        Download byte size per second (e.g., 256KB/s = 256 * 1024).

    Returns
    -------
    path: str
        Output filename.
    """
    if path is None:
        path = (url.replace("/", "-SLASH-").replace(":", "-COLON-").replace(
            "=", "-EQUAL-").replace("?", "-QUESTION-"))
        path = osp.join(cache_root, path)

    # check existence
    if osp.exists(path) and not md5:
        if not quiet:
            print("File exists: {}".format(path))
        return path
    elif osp.exists(path) and md5:
        try:
            assert_md5sum(path, md5, quiet=quiet)
            return path
        except AssertionError as e:
            print(e, file=sys.stderr)

    # download
    lock_path = osp.join(cache_root, "_dl_lock")
    try:
        os.makedirs(osp.dirname(path))
    except OSError:
        pass
    temp_root = tempfile.mkdtemp(dir=cache_root)
    try:
        temp_path = osp.join(temp_root, "dl")

        if not quiet:
            msg = "Cached Downloading"
            if path:
                msg = "{}: {}".format(msg, path)
            else:
                msg = "{}...".format(msg)
            print(msg, file=sys.stderr)

        download(url, temp_path, quiet=quiet, proxy=proxy, speed=speed)
        with filelock.FileLock(lock_path):
            shutil.move(temp_path, path)
    except Exception:
        shutil.rmtree(temp_root)
        raise

    if md5:
        assert_md5sum(path, md5, quiet=quiet)

    # postprocess
    if postprocess is not None:
        postprocess(path)

    return path
コード例 #7
0
def device_lock(serialno, timeout=3600):
    return filelock.FileLock(device_lock_path(serialno), timeout=timeout)
コード例 #8
0
            nodbconnection = False
        except:
            ntry +=1
            if (ntry % 5 == 1): print "Failed to make lariat_prd connection for",ntry,"times... sleep for 5 minutes"
            time.sleep(300)
try:
    os.chdir(sys.argv[1])
    datadir=sys.argv[1]
    if (datadir[len(datadir)-1] != '/'): datadir = datadir+'/'
except:
    print "Data directory: ", datadir, "not found - please restart program"
    sys.exit(1)
#
#  check for file lock
#
lock = filelock.FileLock(datadir+"XporterInProgress")
try: 
    lock.acquire(timeout=5)
except filelock.Timeout as err:
    exit(0)
#
# Run file.Complete.py to check for new files
#
fnull = open("/dev/null","w")
subprocess.call([Xporterdir+"/findComplete.sh",datadir])
#
# Begin data loop 
#
switch = True
count = 0
fextension = ".complete"
コード例 #9
0
ファイル: autorclone.py プロジェクト: csupxh/AutoRclone-1
# 强行杀掉Rclone
def force_kill_rclone_subproc_by_parent_pid(sh_pid):
    if psutil.pid_exists(sh_pid):
        sh_proc = psutil.Process(sh_pid)
        logger.info('Get The Process information - pid: %s, name: %s' %
                    (sh_pid, sh_proc.name()))
        for child_proc in sh_proc.children():
            if child_proc.name().find('rclone') > -1:
                logger.info('Force Killed rclone process which pid: %s' %
                            child_proc.pid)
                child_proc.kill()


if __name__ == '__main__':
    # 单例模式 ( ̄y▽, ̄)╭
    instance_check = filelock.FileLock(instance_lock_path)
    with instance_check.acquire(timeout=0):
        # 加载account信息
        sa_jsons = glob.glob(os.path.join(sa_json_folder, '*.json'))
        if len(sa_jsons) == 0:
            logger.error('No Service Account Credentials JSON file exists.')
            exit(1)

        # 加载instance配置
        if os.path.exists(instance_config_path):
            logger.info('Instance config exist, Load it...')
            config_raw = open(instance_config_path).read()
            instance_config = json.loads(config_raw)

        # 对上次记录的pid信息进行检查
        if 'last_pid' in instance_config:
コード例 #10
0
ファイル: test_pyupdater.py プロジェクト: zizle/PyUpdater
    def test_execution_one_file_extract(
        self,
        cleandir,
        shared_datadir,
        simpleserver,
        pyu,
        custom_dir,
        port,
        windowed,
        split_version,
    ):
        data_dir = shared_datadir / "update_repo_extract"
        pyu.setup()

        # We are moving all of the files from the deploy directory to the
        # cwd. We will start a simple http server to use for updates
        with ChDir(data_dir):
            simpleserver.start(port)

            with open("pyu.log", "w") as f:
                f.write("")

            cmd = "python build_onefile_extract.py %s %s %s %s" % (
                custom_dir,
                port,
                windowed,
                split_version,
            )
            os.system(cmd)

            # Moving all files from the deploy directory to the cwd
            # since that is where we will start the simple server
            deploy_dir = os.path.join("pyu-data", "deploy")
            assert os.path.exists(deploy_dir)
            test_cwd = os.getcwd()
            with ChDir(deploy_dir):
                files = os.listdir(os.getcwd())
                for f in files:
                    if f == ".DS_Store":
                        continue
                    shutil.move(f, test_cwd)

            app_name = "Acme"
            if sys.platform == "win32":
                app_name += ".exe"

            app_run_command = app_name
            if sys.platform != "win32":
                app_run_command = "./{}".format(app_name)

            if sys.platform == "darwin" and windowed:
                app_run_command = "./{}.app/Contents/MacOS/{}".format(
                    app_name, app_name)
                app_name = "{}.app".format(app_name)

            if custom_dir:
                # update with custom_dir is multiprocessing-safe
                lock_path = "pyu.lock"
            else:
                if not os.path.exists(appdirs.user_data_dir(APP_NAME)):
                    os.makedirs(appdirs.user_data_dir(APP_NAME))
                lock_path = os.path.join(appdirs.user_data_dir(APP_NAME),
                                         "pyu.lock")

            update_lock = filelock.FileLock(lock_path, LOCK_TIMEOUT)

            output_file = "version1.txt"
            with update_lock.acquire(LOCK_TIMEOUT, 5):
                count = 0
                while count < 5:
                    # Call the binary to self update
                    subprocess.call(app_run_command, shell=True)
                    if os.path.exists(output_file):
                        break
                    count += 1
                    print("Retrying app launch")
                    # Allow enough time for update process to complete.
                    time.sleep(AUTO_UPDATE_PAUSE)

            # Call the binary to ensure it's
            # the updated binary
            subprocess.call(app_run_command, shell=True)

            simpleserver.stop()
            # Detect if it was an overwrite error

            assert os.path.exists(app_name)
            assert os.path.exists(output_file)
            with open(output_file, "r") as f:
                output = f.read().strip()
            assert output == "4.2"

            if os.path.exists(app_name):
                if os.path.isdir(app_name):
                    remove_any(app_name)
                else:
                    remove_any(app_name)

            if os.path.exists(output_file):
                remove_any(output_file)
コード例 #11
0
ファイル: convert-to-zstd.py プロジェクト: marxin/script-misc
#!/usr/bin/env python3

import os
import shutil
import subprocess
import filelock
import sys

from datetime import datetime

extract_location = '/dev/shm/gcc-bisect-bin/'
install_location = '/home/marxin/DATA/gcc-binaries/'

lock = filelock.FileLock('/tmp/gcc_build_binary.lock')

todo = open('/tmp/all').readlines()

for i, line in enumerate(todo):
    revision = line.strip()
    print('%d/%d: %s' % (i, len(todo), revision))

    archive = os.path.join(install_location, revision + '.7z')
    if os.path.exists(archive):
        with lock:
            shutil.rmtree(extract_location, ignore_errors=True)
            os.mkdir(extract_location)
            start = datetime.now()
            size_before = os.path.getsize(archive)
            cmd = '7z x %s -o%s -aoa' % (archive, extract_location)
            subprocess.check_output(cmd, shell=True)
            tarfile = os.path.join(install_location, revision + '.tar')
コード例 #12
0
    if args.ignore_errors:
        ignore_errors = args.ignore_errors
    else:
        try:
            ignore_errors = config["ignore_errors"]
        except KeyError:
            pass
    logger.debug("Ignored projects: {}".format(ignore_errors))

    try:
        os.chdir("/")
    except OSError as e:
        logger.error("cannot change working directory to /", exc_info=True)
        sys.exit(1)

    lock = filelock.FileLock(
        os.path.join(tempfile.gettempdir(), "opengrok-sync.lock"))
    try:
        with lock.acquire(timeout=0):
            pool = Pool(processes=int(args.workers))

            if args.projects:
                dirs_to_process = args.projects
            elif args.indexed:
                indexed_projects = list_indexed_projects(logger, uri)

                if indexed_projects:
                    for line in indexed_projects:
                        dirs_to_process.append(line.strip())
                else:
                    logger.error("cannot get list of projects")
                    sys.exit(1)
コード例 #13
0
def step_over_diffs(result_dir, action, display=True):
    processed = False
    dname = os.path.dirname(iris.tests.__file__)
    lock = filelock.FileLock(os.path.join(dname, _POSTFIX_LOCK))
    if action in ['first', 'last']:
        kind = action
    elif action in ['similar', 'different']:
        kind = 'most {}'.format(action)
    else:
        emsg = 'Unknown action: {!r}'
        raise ValueError(emsg.format(action))
    if display:
        msg = ('\nComparing the {!r} expected image with '
               'the test result image.')
        print(msg.format(kind))

    # Remove old image diff results.
    target = os.path.join(result_dir, '*{}'.format(_POSTFIX_DIFF))
    for fname in glob(target):
        os.remove(fname)

    with lock.acquire(timeout=30):
        # Load the imagerepo.
        repo_fname = os.path.join(dname, _POSTFIX_JSON)
        with open(repo_fname, 'rb') as fi:
            repo = json.load(codecs.getreader('utf-8')(fi))

        # Filter out all non-test result image files.
        target_glob = os.path.join(result_dir, 'result-*.png')
        results = []
        for fname in sorted(glob(target_glob)):
            # We only care about PNG images.
            try:
                im = Image.open(fname)
                if im.format != 'PNG':
                    # Ignore - it's not a png image.
                    continue
            except IOError:
                # Ignore - it's not an image.
                continue
            results.append(fname)

        count = len(results)

        for count_index, result_fname in enumerate(results):
            key = os.path.splitext('-'.join(result_fname.split('-')[1:]))[0]
            try:
                # Calculate the test result perceptual image hash.
                phash = imagehash.phash(Image.open(result_fname),
                                        hash_size=iris.tests._HASH_SIZE)
                uris = repo[key]
                hash_index, distance = _calculate_hit(uris, phash, action)
                uri = uris[hash_index]
            except KeyError:
                wmsg = 'Ignoring unregistered test result {!r}.'
                warnings.warn(wmsg.format(key))
                continue
            with temp_png(key) as expected_fname:
                processed = True
                resource = requests.get(uri)
                if resource.status_code == 200:
                    with open(expected_fname, 'wb') as fo:
                        fo.write(resource.content)
                else:
                    # Perhaps the uri has not been pushed into the repo yet,
                    # so check if a local "developer" copy is available ...
                    local_fname = os.path.join(result_dir,
                                               os.path.basename(uri))
                    if not os.path.isfile(local_fname):
                        emsg = 'Bad URI {!r} for test {!r}.'
                        raise ValueError(emsg.format(uri, key))
                    else:
                        # The temporary expected filename has the test name
                        # baked into it, and is used in the diff plot title.
                        # So copy the local file to the exected file to
                        # maintain this helpfulness.
                        shutil.copy(local_fname, expected_fname)
                try:
                    mcompare.compare_images(expected_fname,
                                            result_fname,
                                            tol=0)
                except Exception as e:
                    if isinstance(e,  ValueError) or \
                            isinstance(e, ImageComparisonFailure):
                        print('Could not compare {}: {}'.format(
                            result_fname, e))
                        continue
                    else:
                        # Propagate the exception, keeping the stack trace
                        raise
                diff_fname = os.path.splitext(result_fname)[0] + _POSTFIX_DIFF
                args = expected_fname, result_fname, diff_fname
                if display:
                    msg = ('Image {} of {}: hamming distance = {} ' '[{!r}]')
                    status = msg.format(count_index + 1, count, distance, kind)
                    prefix = repo, key, repo_fname, phash, status
                    yield prefix + args
                else:
                    yield args
        if display and not processed:
            print('\nThere are no iris test result images to process.\n')
コード例 #14
0
            self.make_master()
            logging.info("切换状态为MASTER")
        elif self.keepalived == "BACKUP":
            self.make_slave()
            logging.info("切换状态为BACKUP")
        elif self.keepalived == "STOP":
            self.stop_mysql()
            logging.info("切换状态为STOP")
        else:
            logging.error("keepalived配置有误或脚本执行异常")

        self.conn.close()


if __name__ == "__main__":
    lock = filelock.FileLock("/tmp/kps.txt")
    if lock:
        logging.info("ZST Get Lock.start!!!")
    try:
        with lock.acquire(timeout=5):
            pass
    except filelock.timeout:
        print "timeout"
        logging.warning("get file lock timeout")

    mysql = {
        "url": config.dbhost,
        "port": config.dbport,
        "username": config.dbuser,
        "password": config.dbpassword,
        "dbname": "mysql",
コード例 #15
0
ファイル: build-firmware.py プロジェクト: skyformat99/opentx
                'avr-size -A %s | grep Total | cut -f2- -d " "' % target,
                shell=True)
            size = int(size.strip())
        if size > maxsize:
            exit(FIRMWARE_SIZE_TOO_BIG)

    # Copy binary to the binaries directory
    shutil.move(target, path)


if os.path.isfile(errpath):
    print filename
    exit(COMPILATION_ERROR)

if os.path.isfile(path):
    print filename
    exit(0)

lockpath = path + ".lock"
lock = filelock.FileLock(lockpath)
try:
    with lock.acquire(timeout=60 * 60):
        if not os.path.isfile(path):
            build_firmware(path)
except filelock.Timeout:
    print filename
    exit(COMPILATION_ERROR)

print filename
exit(0)
コード例 #16
0
ファイル: wrapper.py プロジェクト: IFCA/mirror
        data.setdefault(mirror_name, {
            "description": "",
            "update-frequency": 0,
            "updates": [],
        })
        data[mirror_name]["updates"].append({
            "start_date":
            start_date.strftime("%Y-%m-%d %H:%M:%S"),
            "end_date":
            end_date.strftime("%Y-%m-%d %H:%M:%S"),
            "duration": (end_date - start_date).total_seconds(),
            "status":
            status.returncode,
        })

        data[mirror_name]["updates"] = data[mirror_name]["updates"][-500:]

        with open(MIRROR_DATA, "w") as f:
            yaml.dump(data, f)
            #    open("MIRROR_", "a").write("You were the chosen one.")


try:
    mirror_lock = filelock.FileLock(MIRROR_LOCK)
    with mirror_lock.acquire(timeout=1):
        do_sync(mirror_name, command)
except filelock.Timeout:
    print(f"ERROR: cron job running or lock not released '{MIRROR_LOCK}'")
    sys.exit(1)
コード例 #17
0
def FitnessFunction(point, sample):
    try:
        tmpl = copy.deepcopy(config.RESULTS_TEMPLATE)
        params = point

        paramFile = '/eos/experiment/ship/user/ffedship/EA_V2/Shared/params' + str(
            sample) + '_{}.root'.format(create_id(params))
        geoinfoFile = paramFile.replace('params', 'geoinfo')
        heavy = '/eos/experiment/ship/user/ffedship/EA_V2/Shared/heavy' + str(
            sample) + '_{}'.format(create_id(params))
        lockfile = paramFile + '.lock'
        print heavy, lockfile
        if os.path.exists(geoinfoFile):
            geolockfile = geoinfoFile + '.lock'
            lock = filelock.FileLock(geolockfile)
            if not lock.is_locked:
                with lock:
                    with open(geoinfoFile, 'r') as f:
                        length, weight = map(float,
                                             f.read().strip().split(','))
                    tmpl['weight'] = weight
                    tmpl['length'] = length
        while not os.path.exists(paramFile) and not os.path.exists(heavy):
            lock = filelock.FileLock(lockfile)
            if not lock.is_locked:
                with lock:
                    tmpl['status'] = 'Acquired lock.'
                    tmp_paramFile = generate_geo(
                        paramFile.replace('.r', '.tmp.r'), params)
                    subprocess.call([
                        'python2',
                        '/afs/cern.ch/user/f/ffedship/private/EA_Muon_Shield_V2/get_geo.py',
                        '-g', tmp_paramFile, '-o', geoinfoFile
                    ])

                    shutil.move(
                        '/eos/experiment/ship/user/ffedship/EA_V2/Shared/' +
                        os.path.basename(tmp_paramFile),
                        paramFile.replace('shared',
                                          'output').replace('params', 'geo'))
                    with open(geoinfoFile, 'r') as f:
                        length, weight = map(float,
                                             f.read().strip().split(','))
                    tmpl['weight'] = weight
                    tmpl['length'] = length
                    shutil.move(
                        '/eos/experiment/ship/user/ffedship/EA_V2/Geometry/' +
                        os.path.basename(tmp_paramFile), paramFile)

                    tmpl['status'] = 'Created geometry.'
                    print "Fitness Function Message: Geometry has been generated using config ", point
                    print "Fitness Function Message: Length ", length
                    print "Fitness Function Message: Weight ", weight
            else:
                sleep(60)
        outFile = root_output_name

        tmpl['status'] = 'Simulating...'
        generate(inputFile=root_input_name,
                 paramFile=paramFile,
                 outFile=root_output_name,
                 seed=1,
                 nEvents=10000)

        tmpl['status'] = 'Analysing...'
        chain = r.TChain('cbmsim')
        chain.Add(outFile)
        xs = analyse(chain, 'hists.root')
        tmpl['muons'] = len(xs)
        tmpl['muons_w'] = sum(xs)
        print "muons: ", tmpl['muons']
        print "muons_w: ", tmpl['muons_w']
        print "Fitness", FCN(tmpl['weight'], np.array(xs), tmpl['length'])[0]
        XS_output = open(csv_output_name, "w")
        XS_write = csv.writer(XS_output)
        XS_write.writerow([tmpl['weight'], tmpl['length'], tmpl['muons_w']])
        XS_output.close()
        tmpl['error'] = None
        tmpl['status'] = 'Done.'
        os.remove(root_output_name)
    except:
        print "EA_LL_FCN Message: Wrong geometry, operation rejected, negative values assigned"
        XS_output = open(csv_output_name, "w")
        XS_write = csv.writer(XS_output)
        XS_write.writerow([100000000, 10000000, 100000000])
        XS_output.close()
コード例 #18
0
 def __init__(self, lockname):
     super(CrdsFileLock, self).__init__(config.get_crds_lockpath(lockname))
     self._lock = filelock.FileLock(self.lockname)
コード例 #19
0
import base58
import filelock
import json

with filelock.FileLock("key.lock", timeout=10):
    try:
        with open("key.txt", "r") as file:
            key_list = json.load(file)
    except:
        key_list = []

with filelock.FileLock("block.lock", timeout=10):
    try:
        with open("block.txt", "r") as file:
            block_list = json.load(file)
    except:
        block_list = []

old_in = []
old_out = []
for block in block_list:
    for tx in block["tx"]:
        old_in.append(tx["in"])
        old_out.append(tx["out"])

unspent = []
unused = []
for key in key_list:
    key_hex = base58.b58decode(key["public"]).hex()
    if key_hex not in old_in:
        if key_hex in old_out:
コード例 #20
0
import json
import os
import logging
import filelock
import tempfile

from django.utils.functional import SimpleLazyObject

logger = logging.getLogger(__name__)

dirname = os.path.dirname(os.path.abspath(__file__))
GatesJsonFile = os.path.join(dirname, 'gates_lizhi.json')
GatesPyFile = os.path.join(dirname, 'biogate.py')

gates_updating_lock = filelock.FileLock(
    os.path.join(tempfile.gettempdir(),
                 'biohub.biocircuit.gates.updating.lock'))


def get_d_gate(lizhi):
    """Get d_gate from gates_lizhi.json.

    Parameters
    ----------
    lizhi : dict
        A string parsed from lizhi.json

    Returns
    -------
    d_gate_d : dict
        Record the 4 parameters.
コード例 #21
0
    def _save(self, name, content):
        full_path = self.path(name)

        # Create any intermediate directories that do not exist.
        # Note that there is a race between os.path.exists and os.makedirs:
        # if os.makedirs fails with EEXIST, the directory was created
        # concurrently, and we can continue normally. Refs #16082.
        directory = os.path.dirname(full_path)
        if not os.path.exists(directory):
            try:
                if self.directory_permissions_mode is not None:
                    # os.makedirs applies the global umask, so we reset it,
                    # for consistency with file_permissions_mode behavior.
                    old_umask = os.umask(0)
                    try:
                        os.makedirs(directory, self.directory_permissions_mode)
                    finally:
                        os.umask(old_umask)
                else:
                    os.makedirs(directory)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
        if not os.path.isdir(directory):
            raise IOError("%s exists and is not a directory." % directory)

        # There's a potential race condition between get_available_name and
        # saving the file; it's possible that two threads might return the
        # same name, at which point all sorts of fun happens. So we need to
        # try to create the file, but if it already exists we have to go back
        # to get_available_name() and try again.

        while True:
            try:
                # This file has a file path that we can move.
                if hasattr(content, 'temporary_file_path'):
                    # file_move_safe(content.temporary_file_path(), full_path)
                    pass  # TODO: move temporary file

                # This is a normal uploadedfile that we can stream.
                else:
                    # This fun binary flag incantation makes os.open throw an
                    # OSError if the file already exists before we open it.
                    flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL
                             | getattr(os, 'O_BINARY', 0))
                    # The current umask value is masked out by os.open!
                    fd = os.open(full_path, flags, 0o666)
                    _file = None
                    try:
                        lock = filelock.FileLock(full_path)
                        with lock.acquire():
                            file = io.FileIO(fd, 'wb', closefd=False)
                            r = content.read(4096)
                            while r:
                                file.write(r)
                                r = content.read(4096)
                            # for chunk in content.chunks():
                            #     if _file is None:
                            #         mode = 'wb' if isinstance(chunk, bytes) else 'wt'
                            #         _file = os.fdopen(fd, mode)
                            #     _file.write(chunk)
                    finally:
                        if _file is not None:
                            _file.close()
                        else:
                            os.close(fd)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # Ooops, the file exists. We need a new file name.
                    name = self.get_available_name(name)
                    full_path = self.path(name)
                else:
                    raise
            else:
                # OK, the file save worked. Break out of the loop.
                break

        if self.file_permissions_mode is not None:
            os.chmod(full_path, self.file_permissions_mode)

        # Store filenames with forward slashes, even on Windows.
        return str(name.replace('\\', '/'))
コード例 #22
0
ファイル: raiden_service.py プロジェクト: tokenbase/raiden
    def __init__(
        self,
        chain: BlockChainService,
        query_start_block: typing.BlockNumber,
        default_registry: TokenNetworkRegistry,
        default_secret_registry: SecretRegistry,
        private_key_bin,
        transport,
        raiden_event_handler: 'RaidenEventHandler',
        config,
        discovery=None,
    ):
        super().__init__()
        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        self.tokennetworkids_to_connectionmanagers = dict()
        self.identifier_to_results: typing.Dict[typing.PaymentID,
                                                AsyncResult, ] = dict()

        self.chain: BlockChainService = chain
        self.default_registry = default_registry
        self.query_start_block = query_start_block
        self.default_secret_registry = default_secret_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)
        self.discovery = discovery

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.transport = transport

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.raiden_event_handler = raiden_event_handler

        self.stop_event = Event()
        self.stop_event.set()  # inits as stopped

        self.wal = None
        self.snapshot_group = 0

        # This flag will be used to prevent the service from processing
        # state changes events until we know that pending transactions
        # have been dispatched.
        self.dispatch_events_lock = Semaphore(1)

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir
            # Prevent concurrent access to the same db
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        self.event_poll_lock = gevent.lock.Semaphore()
コード例 #23
0
ファイル: raiden_service.py プロジェクト: hellosugoi/raiden
    def __init__(
        self,
        chain: BlockChainService,
        query_start_block: typing.BlockNumber,
        default_registry: TokenNetworkRegistry,
        default_secret_registry: SecretRegistry,
        private_key_bin,
        transport,
        config,
        discovery=None,
    ):
        if not isinstance(private_key_bin,
                          bytes) or len(private_key_bin) != 32:
            raise ValueError('invalid private_key')

        self.tokennetworkids_to_connectionmanagers = dict()
        self.identifier_to_results = defaultdict(list)

        self.chain: BlockChainService = chain
        self.default_registry = default_registry
        self.query_start_block = query_start_block
        self.default_secret_registry = default_secret_registry
        self.config = config
        self.privkey = private_key_bin
        self.address = privatekey_to_address(private_key_bin)
        self.discovery = discovery

        if config['transport_type'] == 'udp':
            endpoint_registration_event = gevent.spawn(
                discovery.register,
                self.address,
                config['external_ip'],
                config['external_port'],
            )
            endpoint_registration_event.link_exception(
                endpoint_registry_exception_handler)

        self.private_key = PrivateKey(private_key_bin)
        self.pubkey = self.private_key.public_key.format(compressed=False)
        self.transport = transport

        self.blockchain_events = BlockchainEvents()
        self.alarm = AlarmTask(chain)
        self.shutdown_timeout = config['shutdown_timeout']
        self.stop_event = Event()
        self.start_event = Event()
        self.chain.client.inject_stop_event(self.stop_event)

        self.wal = None

        self.database_path = config['database_path']
        if self.database_path != ':memory:':
            database_dir = os.path.dirname(config['database_path'])
            os.makedirs(database_dir, exist_ok=True)

            self.database_dir = database_dir
            # Prevent concurrent access to the same db
            self.lock_file = os.path.join(self.database_dir, '.lock')
            self.db_lock = filelock.FileLock(self.lock_file)
        else:
            self.database_path = ':memory:'
            self.database_dir = None
            self.lock_file = None
            self.serialization_file = None
            self.db_lock = None

        if config['transport_type'] == 'udp':
            # If the endpoint registration fails the node will quit, this must
            # finish before starting the transport
            endpoint_registration_event.join()

        self.event_poll_lock = gevent.lock.Semaphore()

        self.start()
コード例 #24
0
    def lock(self, key: str, timeout: Union[int, float] = 5) -> None:
        """ Lock for the provided key """

        with filelock.FileLock(self.get_file_path(key) + ".lock",
                               timeout=timeout):
            yield
コード例 #25
0
def add_local_prototype(prototypename):
    AUTHOR_ = 'minemeld-web'
    DESCRIPTION_ = 'Local prototype library managed via MineMeld WebUI'

    try:
        library_path, prototype = _local_library_path(prototypename)

    except ValueError as e:
        return jsonify(error={'message': str(e)}), 400

    lock = filelock.FileLock('{}.lock'.format(library_path))
    with lock.acquire(timeout=10):
        if os.path.isfile(library_path):
            with open(library_path, 'r') as f:
                library_contents = yaml.safe_load(f)
            if not isinstance(library_contents, dict):
                library_contents = {}
            if 'description' not in library_contents:
                library_contents['description'] = DESCRIPTION_
            if 'prototypes' not in library_contents:
                library_contents['prototypes'] = {}
            if 'author' not in library_contents:
                library_contents['author'] = AUTHOR_
        else:
            library_contents = {
                'author': AUTHOR_,
                'description': DESCRIPTION_,
                'prototypes': {}
            }

        try:
            incoming_prototype = request.get_json()
        except Exception as e:
            return jsonify(error={'message': str(e)}), 400

        new_prototype = {
            'class': incoming_prototype['class'],
        }

        if 'config' in incoming_prototype:
            try:
                new_prototype['config'] = yaml.safe_load(
                    incoming_prototype['config']
                )
            except Exception as e:
                return jsonify(error={'message': 'invalid YAML in config'}), 400

        if 'developmentStatus' in incoming_prototype:
            new_prototype['development_status'] = \
                incoming_prototype['developmentStatus']

        if 'nodeType' in incoming_prototype:
            new_prototype['node_type'] = incoming_prototype['nodeType']

        if 'description' in incoming_prototype:
            new_prototype['description'] = incoming_prototype['description']

        if 'indicatorTypes' in incoming_prototype:
            new_prototype['indicator_types'] = incoming_prototype['indicatorTypes']

        if 'tags' in incoming_prototype:
            new_prototype['tags'] = incoming_prototype['tags']

        library_contents['prototypes'][prototype] = new_prototype

        with open(library_path, 'w') as f:
            yaml.safe_dump(library_contents, f, indent=4, default_flow_style=False)

    return jsonify(result='OK'), 200
コード例 #26
0
DIFFICULTY = 4

try:
    shutil.copy('trans.txt', 'peer_trans.txt')
except:
    pass

dir = os.path.dirname(os.path.abspath(__file__))
try:
    with open(os.path.join(dir, 'peer.txt'), 'r') as file:
        peer_list = json.load(file)
except:
    peer_list = []

with filelock.FileLock('block.lock', timeout=10):
    try:
        with open('block.txt', 'r') as file:
            block_list = json.load(file)
    except:
        block_list = []

for peer in peer_list:
    url = 'http://' + peer + '/block.txt'
    try:
        with urllib.request.urlopen(url) as file:
            peer_block_list = json.load(file)
    except:
        peer_block_list = []

    if len(block_list) < len(peer_block_list):
コード例 #27
0
import asyncio
from Blogbot.IO import readData, getBlogProgress, appendBlogProgress, blogfilename, progressfilename
import telebot
import Blogbot.Scraper as Scraper
import Blogbot.telegram as telegram
import filelock

lock = filelock.FileLock(progressfilename + '.lock')


async def main():
    telegram.send_text(telegram.logchat_id, 'Start seaching Blogs')
    print('Start seaching Blogs')
    tasks = [
        (asyncio.create_task(Scraper.Nogi(2)), '乃木坂46'),
        # (asyncio.create_task(Scraper.Keya()), "欅坂46"),
        (asyncio.create_task(Scraper.Saku()), "櫻坂46"),
        (asyncio.create_task(Scraper.Hina()), '日向坂46')
    ]
    for task, group in tasks:
        for blog in await task:
            if blog.url not in getBlogProgress():
                blobs = asyncio.create_task(blog.getImgBlobs())
                print(blog.title, blog.name, blog.url)
                telegram.send_text(telegram.logchat_id,
                                   blog,
                                   disable_web_page_preview=True)
                if blog.name != "運営スタッフ":
                    # telegram.send_text(telegram.logchat_id, f'#{blog.name} 「{blog.title}」\n\n{blog.url}',
                    #                       disable_web_page_preview=True)
                    # for batch in await blobs:
コード例 #28
0
ファイル: __init__.py プロジェクト: ebostijancic/integration
import filelock
from mendertesting import MenderTesting

artifact_lock = filelock.FileLock("artifact_modification")
コード例 #29
0
ファイル: build.py プロジェクト: Quansight/conda-store
def build_conda_environment(conda_store, build):
    """Build a conda environment with set uid/gid/and permissions and
    symlink the build to a named environment

    """
    set_build_started(conda_store, build)

    conda_prefix = build.build_path(conda_store)
    os.makedirs(os.path.dirname(conda_prefix), exist_ok=True)

    environment_prefix = build.environment_path(conda_store)
    os.makedirs(os.path.dirname(environment_prefix), exist_ok=True)

    conda_store.log.info(f"building conda environment={conda_prefix}")

    try:
        with utils.timer(conda_store.log, f"building {conda_prefix}"):
            with tempfile.TemporaryDirectory() as tmpdir:
                tmp_environment_filename = os.path.join(tmpdir, "environment.yaml")
                with open(tmp_environment_filename, "w") as f:
                    yaml.dump(build.specification.spec, f)
                    if conda_store.serialize_builds:
                        with filelock.FileLock(
                            os.path.join(tempfile.tempdir, "conda-store.lock")
                        ):
                            output = build_environment(
                                conda_store.conda_command,
                                tmp_environment_filename,
                                conda_prefix,
                            )
                    else:
                        output = build_environment(
                            conda_store.conda_command,
                            tmp_environment_filename,
                            conda_prefix,
                        )

        utils.symlink(conda_prefix, environment_prefix)

        # modify permissions, uid, gid if they do not match
        stat_info = os.stat(conda_prefix)
        permissions = conda_store.default_permissions
        uid = conda_store.default_uid
        gid = conda_store.default_gid

        if permissions is not None and oct(stat.S_IMODE(stat_info.st_mode))[-3:] != str(
            permissions
        ):
            conda_store.log.info(
                f"modifying permissions of {conda_prefix} to permissions={permissions}"
            )
            with utils.timer(conda_store.log, f"chmod of {conda_prefix}"):
                utils.chmod(conda_prefix, permissions)

        if (
            uid is not None
            and gid is not None
            and (str(uid) != str(stat_info.st_uid) or str(gid) != str(stat_info.st_gid))
        ):
            conda_store.log.info(
                f"modifying permissions of {conda_prefix} to uid={uid} and gid={gid}"
            )
            with utils.timer(conda_store.log, f"chown of {conda_prefix}"):
                utils.chown(conda_prefix, uid, gid)

        packages = conda.conda_prefix_packages(conda_prefix)
        build.size = utils.disk_usage(conda_prefix)

        set_build_completed(conda_store, build, output.encode("utf-8"), packages)
    except subprocess.CalledProcessError as e:
        conda_store.log.exception(e)
        set_build_failed(conda_store, build, e.output.encode("utf-8"))
        raise e
    except Exception as e:
        conda_store.log.exception(e)
        set_build_failed(conda_store, build, traceback.format_exc().encode("utf-8"))
        raise e
コード例 #30
0
ファイル: mirror.py プロジェクト: lk27748772/opengrok
        #
        # Technically, adding a handler to the logger is not necessary
        # since log rotation is done above using doRollover() however
        # it is done anyway in case the handler changes to use implicit
        # rotation in the future.
        #
        logger.addHandler(handler)

    # We want this to be logged to the log file (if any).
    if project_config:
        if project_config.get('disabled'):
            logger.info("Project {} disabled, exiting".
                        format(args.project))
            sys.exit(2)

    lock = filelock.FileLock(os.path.join(tempfile.gettempdir(),
                             args.project + "-mirror.lock"))
    try:
        with lock.acquire(timeout=0):
            proxy = config.get('proxy') if use_proxy else None
            if prehook:
                logger.info("Running pre hook")
                if run_hook(logger, prehook,
                            os.path.join(source_root, args.project), proxy,
                            hook_timeout) != 0:
                    logger.error("pre hook failed")
                    logging.shutdown()
                    sys.exit(1)

            #
            # If one of the repositories fails to sync, the whole project sync
            # is treated as failed, i.e. the program will return 1.