import sys if sys.platform == 'linux2': import DLFCN sys.setdlopenflags(DLFCN.RTLD_NOW | DLFCN.RTLD_GLOBAL) from liblcg_PyCoral import * import liblcg_PyCoral __doc__ = liblcg_PyCoral.__doc__ del liblcg_PyCoral del DLFCN else: if sys.platform == 'darwin': from liblcg_PyCoral import * import liblcg_PyCoral __doc__ = liblcg_PyCoral.__doc__ del liblcg_PyCoral else: from lcg_PyCoral import * import lcg_PyCoral __doc__ = lcg_PyCoral.__doc__ del lcg_PyCoral del sys
""" A filter (in the sense of Unix, not DSP) for audio files, using Pd. The idea is to read a wav file (either from stdin or from a file given with -i), send it into a Pd patch (given by the file name and an option path) and to write the result to stdin or to a file given with -o. """ try: # On Linux dlopenflags need to be set for Pd plugin loading to work. import DLFCN import sys sys.setdlopenflags(DLFCN.RTLD_LAZY | DLFCN.RTLD_GLOBAL) except ImportError: pass import wave import pylibpd def pdfilter(inp, outp, patch, folder='.'): inw = wave.open(inp, 'rb') outw = wave.open(outp, 'wb') try: w = inw.getsampwidth() if w != 2: raise Exception('wrong sample width') ch = inw.getnchannels() sr = inw.getframerate() n = inw.getnframes() outw.setsampwidth(w) outw.setnchannels(ch) outw.setframerate(sr)
"""Tests of utilities supporting export to SavedModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import tempfile import time # pylint: disable=g-import-not-at-top # TODO(jart): #6568 Remove this hack that makes dlopen() not crash. if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"): import ctypes sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) from tensorflow.contrib.layers.python.layers import feature_column as fc from tensorflow.contrib.learn.python.learn import export_strategy as export_strategy_lib from tensorflow.contrib.learn.python.learn.estimators import constants from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.utils import input_fn_utils from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import test
#!/usr/bin/env python3 # package file for fract4d import sys import os sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_NOW)
#!/usr/bin/env python import mobilec import os import sys sys.setdlopenflags(0x100 | 0x2) from ctypes import * libmc = CDLL("libmc.so") agencyoptions = mobilec.AgencyOptions() ch_options = mobilec.ChOptions(chhome=os.environ['CHHOME']) agencyoptions.ch_options = ch_options agency = mobilec.Agency(port=5051, options=agencyoptions) raw_input('Press any key to quit the server...')
# This is a mock version of torch for use with `rtld_global_warning_test`, # simulating the following line: # https://github.com/pytorch/pytorch/blob/v1.0.0/torch/__init__.py#L75 import os as _dl_flags import sys # Make the check in `pydrake/__init__.py` pass, but then undo the change. _old_flags = sys.getdlopenflags() sys.setdlopenflags(_dl_flags.RTLD_GLOBAL) sys.setdlopenflags(_old_flags)
import mfem if mfem.mfem_mode is None: mfem.mfem_mode = 'serial' if mfem.mfem_mode == 'parallel': raise ImportError("MFEM parallel mode is already loaded") debug_print = mfem.debug_print import sys, ctypes ## libmfem.a is linked only with _array.so ## this make sure that symbols are resovled rtld_now = sys.getdlopenflags() sys.setdlopenflags(ctypes.RTLD_GLOBAL | sys.getdlopenflags()) from array import * from point import * from segment import * from common_functions import * from socketstream import * from operators import * from blockoperator import * from blockvector import * from blockmatrix import * from coefficient import * from lininteg import * from mesh import * from fe_coll import * from vector import * from fespace import * from linearform import *
from pathlib import Path import sys import threading from common import get_rosbag_options, wait_for import rclpy from rclpy.qos import QoSProfile import rosbag2_py from std_msgs.msg import String if os.environ.get('ROSBAG2_PY_TEST_WITH_RTLD_GLOBAL', None) is not None: # This is needed on Linux when compiling with clang/libc++. # TL;DR This makes class_loader work when using a python extension compiled with libc++. # # For the fun RTTI ABI details, see https://whatofhow.wordpress.com/2015/03/17/odr-rtti-dso/. sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY) def test_options_qos_conversion(): # Tests that the to-and-from C++ conversions are working properly in the pybind structs simple_overrides = {'/topic': QoSProfile(depth=10)} play_options = rosbag2_py.PlayOptions() play_options.topic_qos_profile_overrides = simple_overrides assert play_options.topic_qos_profile_overrides == simple_overrides record_options = rosbag2_py.RecordOptions() record_options.topic_qos_profile_overrides = simple_overrides assert record_options.topic_qos_profile_overrides == simple_overrides
import ctypes import inspect import sys import traceback # go/tf-wildcard-import # pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top # pywrap_tensorflow is a SWIG generated python library that dynamically loads # _pywrap_tensorflow.so. The default mode for loading keeps all the symbol # private and not visible to other libraries that may be loaded. Setting # the mode to RTLD_GLOBAL to make the symbols visible, so libraries such # as the ones implementing custom ops can have access to tensorflow # framework's symbols. _default_dlopen_flags = sys.getdlopenflags() sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL) from tensorflow.python import pywrap_tensorflow sys.setdlopenflags(_default_dlopen_flags) try: from tensorflow.core.framework.graph_pb2 import * except ImportError: msg = """%s\n\nError importing tensorflow. Unless you are using bazel, you should not try to import tensorflow from its source directory; please exit the tensorflow source tree, and relaunch your python interpreter from there.""" % traceback.format_exc() raise ImportError(msg) from tensorflow.core.framework.summary_pb2 import * from tensorflow.core.framework.attr_value_pb2 import * from tensorflow.core.protobuf.config_pb2 import *
# Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # from . import pivy_meta __version__ = pivy_meta.__version__ __all__ = ['coin'] # set dynamic link flags for global to allow Coin to use dynamic loading try: import sys, dl sys.setdlopenflags(dl.RTLD_GLOBAL | sys.getdlopenflags()) except Exception as e: None # initialize the Coin system from .coin import SoDB, SoNodeKit, SoInteraction SoDB.init() SoNodeKit.init() SoInteraction.init()
def import_scope(flags): prev = sys.getdlopenflags() sys.setdlopenflags(flags) yield sys.setdlopenflags(prev)
def getGoodBRuns(): runs_b_on = [] sys.setdlopenflags(DLFCN.RTLD_GLOBAL + DLFCN.RTLD_LAZY) a = FWIncantation() #os.putenv("CORAL_AUTH_PATH","/afs/cern.ch/cms/DB/conddb") rdbms = RDBMS("/afs/cern.ch/cms/DB/conddb") db = rdbms.getDB(options.dbName) tags = db.allTags() if options.printTags: print "\nOverview of all tags in " + options.dbName + " :\n" print tags print "\n" sys.exit() # for inspecting last run after run has started #tag = 'runinfo_31X_hlt' tag = options.dbTag # for inspecting last run after run has stopped #tag = 'runinfo_test' try: #log = db.lastLogEntry(tag) #for printing all log info present into log db #print log.getState() iov = inspect.Iov(db, tag) #print "########overview of tag "+tag+"########" #print iov.list() if v > 1: print "######## summries ########" for x in iov.summaries(): print x[0], x[1], x[2], x[3] what = {} if v > 1: print "###(start_current,stop_current,avg_current,max_current,min_current,run_interval_micros) vs runnumber###" print iov.trend(what) if v > 0: print "######## trends ########" for x in iov.trendinrange(what, options.startRun - 1, options.endRun + 1): if v > 0 or x[0] == 67647 or x[0] == 66893 or x[0] == 67264: print x[0], x[1], x[2], x[2][4], x[2][3] #print x[0],x[1] ,x[2], x[2][4], timeStamptoUTC(x[2][6]), timeStamptoUTC(x[2][7]) if x[2][4] >= minI and x[2][3] <= maxI: runs_b_on.append(int(x[0])) except Exception as er: print er print "### runs with good B field ###" print runs_b_on return runs_b_on
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU Lesser General Public License for more details. #You should have received a copy of the GNU Lesser General Public License #along with M3. If not, see <http://www.gnu.org/licenses/>. import time import xmlrpclib import SocketServer import SimpleXMLRPCServer import sys import ctypes from m3.toolbox_core import M3Exception flags = sys.getdlopenflags() sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL) #allow exceptions to be passed between dll's import m3.m3rt_system import socket import m3.rt_proxy as m3p import m3.toolbox_core as m3t from threading import Thread from threading import Event ## Handle Ctrl+c even though ros is launched import signal stop_signal=Event() def stop_program(signal, frame): stop_signal.set() class MyTCPServer(SocketServer.TCPServer): def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
Underworld is an open-source project. """ # first import h5py. this is due to the dlopen changes which follow. # by importing h5py, we ensure it uses the libHDF5 it was built against # as opposed to version PETSc is possibly built against. import h5py as _h5py # ok, now need to change default python dlopen flags to global # this is because when python imports the module, the shared libraries are loaded as RTLD_LOCAL # and then when MPI_Init is called, OpenMPI tries to dlopen its plugin, they are unable to # link to the openmpi libraries as they are private import sys as _sys import ctypes as _ctypes _oldflags = _sys.getdlopenflags() _sys.setdlopenflags( _oldflags | _ctypes.RTLD_GLOBAL ) __version__ = "2.8.0b" # squelch h5py/numpy future warnings import warnings as _warnings _warnings.simplefilter(action='ignore', category=FutureWarning) # also these warnings as they are very noisey and not necessary # https://stackoverflow.com/questions/40845304/runtimewarning-numpy-dtype-size-changed-may-indicate-binary-incompatibility _warnings.filterwarnings("ignore", message="numpy.dtype size changed") _warnings.filterwarnings("ignore", message="numpy.ufunc size changed") # DEPRECATE # let's check PYTHONPATH includes path to lib directory, as this will probably # catch people out. import os as _os
# The dl module seems to be missing on 64-bit platforms. # Since RTLD_NOW==0x002 and RTLD_GLOBAL==0x100 very commonly # we will just guess that the proper flags are 0x102 when there # is no dl module. try: import dl newflags = dl.RTLD_NOW | dl.RTLD_GLOBAL logRecExOnline_globalconfig.info("import dl failed with newflags=%s" % newflags) logRecExOnline_globalconfig.info( "proceeding with preconfigured newflags=0x102") except: newflags = 0x102 # No dl module, so guess (see above). try: sys.setdlopenflags(newflags) except: logRecExOnline_globalconfig.info( "sys.setdlopenflags(newflags) failed with newflags=%s" % newflags) #import dl #sys.setdlopenflags(dl.RTLD_GLOBAL | dl.RTLD_NOW) ### remember flags to set this correctly via default from AthenaCommon.BFieldFlags import jobproperties FirstSample = 3 NSamples = 5 ### initialize flags before SetupField to choose correct condition tag database. 5 May 2017 from AthenaCommon.GlobalFlags import globalflags from AthenaCommon.AthenaCommonFlags import jobproperties, athenaCommonFlags
try: import dl except: try: import DLFCN dl = DLFCN except: try: import ctypes as dl except: raise ImportError("Failed to import dl or one of its successors") # not compatible with python 3 + pyqt4 binaries import six if six.PY2: sys.setdlopenflags( dl.RTLD_NOW | dl.RTLD_GLOBAL if hasattr(dl, "RTLD_NOW") else dl.RTLD_GLOBAL) # sys.argv is not present when embedding a Python interpreter, but some # packages (i.e. numarray) seem to fall over when it is not found. So we # inject it if not hasattr(sys, 'argv'): setattr(sys, 'argv', ['meqkernel']) # now import the rest from Timba import dmi from Timba import utils import sys import imp import os.path
molSimplify is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with molSimplify. If not, see http://www.gnu.org/licenses/. ''' # fix OB bug: https://github.com/openbabel/openbabel/issues/1983 import sys, argparse, os, platform, shutil flags = sys.getdlopenflags() import openbabel sys.setdlopenflags(flags) from .Scripts.inparse import * from .Scripts.generator import * from molSimplify.Classes.globalvars import * globs = globalvars() ## Basic help description string DescString_basic = 'Welcome to molSimplify. Only basic usage is described here.\n' DescString_basic += 'For help on advanced modules, please refer to our documentation at molsimplify.mit.edu or provide additional commands to -h, as below:\n' DescString_basic += '-h advanced: advanced structure generation help\n' DescString_basic += '-h slabgen: slab builder help\n' # DescString_basic += '-h chainb: chain builder help\n' DescString_basic += '-h autocorr: automated correlation analysis help\n' DescString_basic += '-h db: database search help\n' DescString_basic += '-h inputgen: quantum chemistry code input file generation help\n'
# flake8: noqa # Store dl open flags to restore them after import import sys stored_dlopen_flags = sys.getdlopenflags() # Developer note: below is related to OpenMPI # Fix dlopen flags (may need reorganising) if "linux" in sys.platform: # FIXME: What with other platforms? try: from ctypes import RTLD_NOW, RTLD_GLOBAL except ImportError: RTLD_NOW = 2 RTLD_GLOBAL = 256 sys.setdlopenflags(RTLD_NOW | RTLD_GLOBAL) del sys # Reset dl open flags # sys.setdlopenflags(stored_dlopen_flags) # del sys # Import cpp modules from .cpp import __version__ from dolfin.common import (has_debug, has_petsc_complex, has_parmetis, git_commit_hash, TimingType, timing, timings, list_timings) import dolfin.MPI import dolfin.log
def __exit__(self, *args): if self.saved_rtld: sys.setdlopenflags(self.saved_rtld) self.saved_rtld = None
from ufl import * # Add log level present in C++ DOLFIN but not ufl.log PROGRESS = 16 # Remove some unnecessary imports from UFL del set_handler, set_level # Import names from the compiled cpp modules from . import cpp from dolfin.cpp import * from dolfin.cpp import __version__, __swigversion__, __pythonversion__ # Reset dl open flags import sys sys.setdlopenflags(dolfin.importhandler.stored_dlopen_flags) del sys # Repair namespace from ufl import FiniteElement # Import UFL color printing from ufl.log import info_red, info_green, info_blue # List of supported elements from FFC try: from ffc import supported_elements, supported_elements_for_plotting except: supported_elements = [] supported_elements_for_plotting = []
########################################################################## __import__("GafferScene") try: # Make sure we import IECoreArnold and _GafferArnold # _without_ RTLD_GLOBAL. This prevents clashes between the # LLVM symbols in libai.so and the Mesa OpenGL driver. # Ideally we wouldn't use RTLD_GLOBAL anywhere - see # https://github.com/ImageEngine/cortex/pull/810. import sys import ctypes originalDLOpenFlags = sys.getdlopenflags() sys.setdlopenflags(originalDLOpenFlags & ~ctypes.RTLD_GLOBAL) __import__("IECoreArnold") from ._GafferArnold import * finally: sys.setdlopenflags(originalDLOpenFlags) del sys, ctypes, originalDLOpenFlags from .ArnoldShaderBall import ArnoldShaderBall from .ArnoldTextureBake import ArnoldTextureBake __import__("IECore").loadConfig("GAFFER_STARTUP_PATHS", subdirectory="GafferArnold")
import os, sys, DLFCN sys.setdlopenflags(DLFCN.RTLD_GLOBAL + DLFCN.RTLD_LAZY) from pluginCondDBPyInterface import * a = FWIncantation() #os.putenv("CORAL_AUTH_PATH","/afs/cern.ch/cms/DB/conddb") rdbms = RDBMS("/afs/cern.ch/cms/DB/conddb") dbName = "oracle://cms_orcoff_prod/CMS_COND_31X_RUN_INFO" logName = "oracle://cms_orcoff_prod/CMS_COND_31X_POPCONLOG" rdbms.setLogger(logName) from CondCore.Utilities import iovInspector as inspect db = rdbms.getDB(dbName) tags = db.allTags() # for inspecting last run after run has started tag = 'runinfo_start_31X_hlt' # for inspecting last run after run has stopped #tag = 'runsummary_test' try: log = db.lastLogEntry(tag) #for printing all log info present into log db #print log.getState() # for inspecting all payloads/runs #iov = inspect.Iov(db,tag)
from netCDF4 import Dataset netcdf_file = Dataset else: from quippy.pupynere import netcdf_file # if _quippy.so is dynamically linked with openmpi, we need to change dlopen() flags before importing it if ('openmpi' in cfg.sections() and 'dynamic' in cfg.options['openmpi']) or \ ('QUIP_ARCH' in os.environ and os.environ['QUIP_ARCH'].endswith('openmpi')): try: # Python 2.5 or newer from ctypes import RTLD_GLOBAL except ImportError: # Python 2.4 from dl import RTLD_GLOBAL flags = sys.getdlopenflags() sys.setdlopenflags(flags | RTLD_GLOBAL) available_modules.append('mpi') import _quippy # Reference values of .true. and .false. from Fortran QUIPPY_TRUE = _quippy.qp_reference_true() QUIPPY_FALSE = _quippy.qp_reference_false() from oo_fortran import FortranDerivedType, FortranDerivedTypes, FortranRoutines, fortran_class_prefix, wrap_all # Read spec file generated by f90doc and construct wrappers for classes # and routines found therein. def quippy_cleanup():
dl = None except SystemError: dl = None import __helper # set the dlopen flags so that VTK does not run into problems with # shared symbols. try: # only Python >= 2.2 has this functionality orig_dlopen_flags = sys.getdlopenflags() except AttributeError: orig_dlopen_flags = None if dl and (os.name == 'posix'): sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL) # Load all required kits. from common import * from filtering import * from io import * from imaging import * from graphics import * # the vtk.kits variable tells us which kits we actually have kits = ['common', 'filtering', 'io', 'imaging', 'graphics'] # Try to load optional kits. The helper function checks if the # ImportError is actually a link error. try:
## The new methods are faster in modern versions of python. ## ## Also, replace ## try: ## dictionary[key].append(item) ## except KeyError: ## dictionary[key] = [item] ## with ## dictionary.setdefault(key, []).append(item) # g++ code won't correctly resolve dynamic casts in shared libraries # unless RTLD_GLOBAL is set when the library is loaded. See # http://gcc.gnu.org/faq.html#dso. This must be done before any other # oof modules are loaded. try: sys.setdlopenflags(0x101) # RTLD_GLOBAL (0x100) | RTLD_LAZY (0x001) except AttributeError: pass # The following calls initialize elements of the C++/Python interface, # and must done before anything else that might call OOF C++ code. # threadstate.py must be imported on the main thread before it's # imported on any other thread. from ooflib.SWIG.common import threadstate # switchboard is used for communication between modules import ooflib.SWIG.common.switchboard # These can be imported in any order. from ooflib.SWIG.common import config from ooflib.SWIG.common import crandom from ooflib.SWIG.common import lock
import os import sys import time sys.setdlopenflags(os.RTLD_LAZY) import xstream # noqa import vision_type as vt # noqa import xproto # noqa # it is more like a template # attributes can be modified while defining workflow faster_rcnn = xstream.Method("FasterRCNNMethod").inputs(["image"]) mot = xstream.Method("MOTMethod").inputs(["face_box"]) \ .config_file("configs/method_configs/iou_method_param.json") merge = xstream.Method("MergeMethod") \ .config_file("configs/method_configs/merge_head_body.json") def body_solution(image): body_box, head_box, face_box, lmk, pose, kps = faster_rcnn( image, outputs=["body_box", "head_box", "face_box", "lmk", "pose", "kps"], config_file="configs/method_configs/multitask_config.json") face_bbox_list, face_disappeared_track_id_list = mot( face_box, outputs=["face_bbox_list", "face_disappeared_track_id_list"]) head_bbox_list, head_disappeared_track_id_list = mot( head_box, inputs=["head_box"], outputs=["head_bbox_list", "head_disappeared_track_id_list"]) body_bbox_list, body_disappeared_track_id_list = mot( body_box,
# ---------------------------------------------------------------- # Temporary workaround for ticket:181 (swig+python problem) import sys _RTLD_GLOBAL = 0 try: from dl import RTLD_GLOBAL as _RTLD_GLOBAL except ImportError: try: from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL except ImportError: pass if _RTLD_GLOBAL != 0: _dlopenflags = sys.getdlopenflags() sys.setdlopenflags(_dlopenflags | _RTLD_GLOBAL) # ---------------------------------------------------------------- # import swig generated symbols into the plzr namespace from plzr_swig import * # import any pure python here from plzr import plzr # # ---------------------------------------------------------------- # Tail of workaround if _RTLD_GLOBAL != 0: sys.setdlopenflags(_dlopenflags) # Restore original flags # ----------------------------------------------------------------
#https://github.com/salilab/imp/issues/732 # this should be in the __init__.py # # How to check for openmpi? # import sys def setrc(): import mpi4py.rc mpi4py.rc.initialize = False mpi4py.rc.finalize = False if sys.platform == 'linux2': import DLFCN as dl flags = sys.getdlopenflags() sys.setdlopenflags(dl.RTLD_NOW | dl.RTLD_GLOBAL) setrc() from .pymusic import * sys.setdlopenflags(flags) else: setrc() from .pymusic import * #import DLFCN as dl #import sys #sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
# # If you're using this setting, you must verify that all the libraries # you load consistently use the same libstdc++, or you may have # mysterious segfaults. # import os as _dl_flags if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr( _dl_flags, 'RTLD_LAZY'): try: # next try if DLFCN exists import DLFCN as _dl_flags # type: ignore except ImportError: # as a last attempt, use compile-time constants import torch._dl as _dl_flags # type: ignore old_flags = sys.getdlopenflags() sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY) from torch._C import * sys.setdlopenflags(old_flags) del old_flags del _dl_flags else: # Easy way. You want this most of the time, because it will prevent # C++ symbols from libtorch clobbering C++ symbols from other # libraries, leading to mysterious segfaults. # # If building in an environment where libtorch_global_deps isn't available # like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will # want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False # # See Note [Global dependencies]
#imports import os import sys import ctypes flags = sys.getdlopenflags() sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL) import numpy as np from pyproj import Proj from datetime import datetime, timedelta from dateutil import tz import pandas import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import pyselfe sys.setdlopenflags(flags) tcoon_zone = tz.gettz('UTC') utc_6 = tz.gettz('UTC-6') start_date = datetime(2000, 1, 1, 0, 0, 0, 0, utc_6) end_date = datetime(2001, 1, 1, 0, 0, 0, 0, utc_6) #start_date.replace(tzinfo=utc_6) def mk_tcoon_date(text): utc = datetime.strptime(text.strip('"'), '%m-%d-%Y %H%M') utc = utc.replace(tzinfo=tcoon_zone) return utc.astimezone(utc_6)