def upload_to_google_storage(input_filenames, base_url, gsutil, force, use_md5, num_threads, skip_hashing, gzip): # We only want one MD5 calculation happening at a time to avoid HD thrashing. md5_lock = threading.Lock() # Start up all the worker threads plus the printer thread. all_threads = [] ret_codes = Queue.Queue() ret_codes.put((0, None)) upload_queue = Queue.Queue() upload_timer = time.time() stdout_queue = Queue.Queue() printer_thread = PrinterThread(stdout_queue) printer_thread.daemon = True printer_thread.start() for thread_num in range(num_threads): t = threading.Thread(target=_upload_worker, args=[ thread_num, upload_queue, base_url, gsutil, md5_lock, force, use_md5, stdout_queue, ret_codes, gzip ]) t.daemon = True t.start() all_threads.append(t) # We want to hash everything in a single thread since its faster. # The bottleneck is in disk IO, not CPU. hashing_start = time.time() for filename in input_filenames: if not os.path.exists(filename): stdout_queue.put('Main> Error: %s not found, skipping.' % filename) continue if os.path.exists('%s.sha1' % filename) and skip_hashing: stdout_queue.put( 'Main> Found hash for %s, sha1 calculation skipped.' % filename) with open(filename + '.sha1', 'rb') as f: sha1_file = f.read(1024) if not re.match('^([a-z0-9]{40})$', sha1_file): print >> sys.stderr, 'Invalid sha1 hash file %s.sha1' % filename return 1 upload_queue.put((filename, sha1_file)) continue stdout_queue.put('Main> Calculating hash for %s...' % filename) sha1_sum = get_sha1(filename) with open(filename + '.sha1', 'wb') as f: f.write(sha1_sum) stdout_queue.put('Main> Done calculating hash for %s.' % filename) upload_queue.put((filename, sha1_sum)) hashing_duration = time.time() - hashing_start # Wait for everything to finish. for _ in all_threads: upload_queue.put((None, None)) # To mark the end of the work queue. for t in all_threads: t.join() stdout_queue.put(None) printer_thread.join() # Print timing information. print 'Hashing %s files took %1f seconds' % (len(input_filenames), hashing_duration) print 'Uploading took %1f seconds' % (time.time() - upload_timer) # See if we ran into any errors. max_ret_code = 0 for ret_code, message in ret_codes.queue: max_ret_code = max(ret_code, max_ret_code) if message: print >> sys.stderr, message if not max_ret_code: print 'Success!' return max_ret_code
def __init__(self): super().__init__() self.url_store = {} self.id = 0 self.lock = threading.Lock()
# Run the plots in different threads thread0 = None thread1 = None # Create two different canvases canvas0 = None canvas1 = None # Create the x array x = plplot_py_demos.arange(NPTS) # Lock on the gtkstate so that we don't try to plot after gtk_main_quit GTKSTATE_CONTINUE = True GTKSTATE_QUIT = False gtk_state_lock = threading.Lock() gtk_state = GTKSTATE_CONTINUE # setup_plt - preparation for plotting an animation to a canvas def setup_plot(canvas, title): # Set up the viewport and window canvas.vsta() canvas.wind(x[0], x[NPTS - 1], -2., 2.) # Set the pen width canvas.wid(2) # The axes should be persistent, so that they don't have to be # replotted every time (which would slow down the animation) canvas.use_persistence(True)
os.path.join(common.THIRD_PARTY_DIR, 'gae-cloud-storage-1.9.22.1'), os.path.join(common.THIRD_PARTY_DIR, 'gae-mapreduce-1.9.22.0'), os.path.join(common.THIRD_PARTY_DIR, 'gae-pipeline-1.9.22.1'), os.path.join(common.THIRD_PARTY_DIR, 'graphy-1.0.0'), os.path.join(common.THIRD_PARTY_DIR, 'html5lib-python-1.0.1'), os.path.join(common.THIRD_PARTY_DIR, 'mutagen-1.42.0'), os.path.join(common.THIRD_PARTY_DIR, 'simplejson-3.16.0'), os.path.join(common.THIRD_PARTY_DIR, 'six-1.12.0'), os.path.join(common.THIRD_PARTY_DIR, 'soupsieve-1.9.1'), os.path.join(common.THIRD_PARTY_DIR, 'webencodings-0.5.1'), ] COVERAGE_PATH = os.path.join( os.getcwd(), '..', 'oppia_tools', 'coverage-4.5.4', 'coverage') TEST_RUNNER_PATH = os.path.join(os.getcwd(), 'core', 'tests', 'gae_suite.py') LOG_LOCK = threading.Lock() ALL_ERRORS = [] # This should be the same as core.test_utils.LOG_LINE_PREFIX. LOG_LINE_PREFIX = 'LOG_INFO_TEST: ' _LOAD_TESTS_DIR = os.path.join(os.getcwd(), 'core', 'tests', 'load_tests') _PARSER = argparse.ArgumentParser(description=""" Run this script from the oppia root folder: python -m scripts.run_backend_tests IMPORTANT: Only one of --test_path and --test_target should be specified. """) _EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group() _EXCLUSIVE_GROUP.add_argument( '--test_target',
import threading import socket import portCheck # This program checks a specified port on a given IP. found = 0 IP = 0 PORT = 0 socket.setdefaulttimeout(0.06) print_lock = threading.Lock() def portscan(t_IP,port): portCheck.found = 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((t_IP, port)) with print_lock: s.send("0101".encode('utf-8')) try: message = s.recv(100).decode('utf-8') except: s.close() if message == "0101": portCheck.found = 1 portCheck.IP = t_IP portCheck.PORT = port s.close() except:
def __init__(self, value=0): self.v = value self.lock = threading.Lock()
def start(self, node, env): self.addlines(node) while self.lines: (x, y) = self.lines.pop(0) if x == c_preproc.POPFILE: self.currentnode_stack.pop() continue self.tryfind(y) from waflib import Context, Errors import threading #from waflib.Tools import c_preproc lock = threading.Lock() g_printed_messages = {} class cppcheck(Task): def scan(task): try: incn = task.generator.includes_nodes except AttributeError: raise Errors.WafError( '%r is missing a feature such as "c", "cxx" or "includes": ' % task.generator) incn_new = [] # For CppCheck, only probe engine files, don't track deps for 3rdParty for i in incn:
class Brain: train_queue = [[], [], [], [], []] # s, a, r, s', s' terminal mask train_queue_copy = [[], [], [], [], []] # s, a, r, s', s' terminal mask lock_queue = threading.Lock() def __init__(self): self.session = tf.Session() K.set_session(self.session) K.manual_variable_initialization(True) self.model = self._build_model() self.graph = self._build_graph(self.model) self.session.run(tf.global_variables_initializer()) self.default_graph = tf.get_default_graph() self.rewards = {} for i in range(DAY0, DAYN): self.rewards[i]=0.0 # self.default_graph.finalize() # avoid modifications def _build_model(self): l_input = Input(batch_shape=(None, NUM_STATE)) l_input1 = Lambda(lambda x: x[:, 0:NUM_STATE - 7])(l_input) l_input2 = Lambda(lambda x: x[:, -7:])(l_input) l_input1 = Reshape((DEFAULT_NUM_TCLS, 1))(l_input1) l_Pool = AveragePooling1D(pool_size=NUM_STATE - 7)(l_input1) l_Pool = Reshape([1])(l_Pool) l_dense = Concatenate()([l_Pool, l_input2]) l_dense = Dense(100, activation='relu')(l_dense) l_dense = Dropout(0.3)(l_dense) out_tcl_actions= Dense(NUM_ACTIONS_TCLs, activation='softmax')(l_dense) out_price_actions= Dense(NUM_ACTIONS_PRICES, activation='softmax')(l_dense) out_deficiency_actions= Dense(NUM_ACTIONS_DEF, activation='softmax')(l_dense) out_excess_actions= Dense(NUM_ACTIONS_EXCESS, activation='softmax')(l_dense) # out = Dense(NUM_ACTIONS, activation='softmax')(l_dense) out_value = Dense(1, activation='linear')(l_dense) # model = Model(inputs=l_input, outputs=[out, out_value]) model = Model(inputs=l_input, outputs=[out_tcl_actions,out_price_actions,out_deficiency_actions,out_excess_actions, out_value]) model._make_predict_function() # have to initialize before threading return model def _build_graph(self, model): s_t = tf.placeholder(tf.float32, shape=(None, NUM_STATE)) a_t = tf.placeholder(tf.float32, shape=(None, NUM_ACTIONS)) r_t = tf.placeholder(tf.float32, shape=(None, 1)) # not immediate, but discounted n step reward tcl_p, price_p, deficiency_p, excess_p, v = model(s_t) a_t_tcl, a_t_price, a_t_def, a_t_excess = tf.split(a_t, [NUM_ACTIONS_TCLs, NUM_ACTIONS_PRICES, NUM_ACTIONS_DEF, NUM_ACTIONS_EXCESS], 1) log_prob_tcl = tf.math.log(tf.reduce_sum(input_tensor=tcl_p * a_t_tcl, axis=1, keepdims=True) + 1e-10) log_prob_price = tf.math.log(tf.reduce_sum(input_tensor=price_p * a_t_price, axis=1, keepdims=True) + 1e-10) log_prob_deficiency = tf.math.log(tf.reduce_sum(input_tensor=deficiency_p * a_t_def, axis=1, keepdims=True) + 1e-10) log_prob_excess = tf.math.log(tf.reduce_sum(input_tensor=excess_p * a_t_excess, axis=1, keepdims=True) + 1e-10) log_prob = log_prob_tcl + log_prob_price + log_prob_deficiency + log_prob_excess advantage = r_t - v loss_policy = -log_prob * tf.stop_gradient(advantage) # maximize policy loss_value = LOSS_V * tf.square(advantage) # minimize value error entropy = LOSS_ENTROPY * ( tf.reduce_sum(input_tensor=tcl_p * tf.math.log(tcl_p + 1e-10), axis=1, keepdims=True) + tf.reduce_sum(input_tensor=price_p * tf.math.log(price_p + 1e-10), axis=1, keepdims=True) + tf.reduce_sum(input_tensor=deficiency_p * tf.math.log(deficiency_p + 1e-10), axis=1, keepdims=True) + tf.reduce_sum(input_tensor=excess_p * tf.math.log(excess_p + 1e-10), axis=1, keepdims=True)) # maximize entropy (regularization) loss_total = tf.reduce_mean(loss_policy + loss_value + entropy) optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE) minimize = optimizer.minimize(loss_total) return s_t, a_t, r_t, minimize, loss_total def optimize(self): if len(self.train_queue_copy[0])<TR_FREQ or len(self.train_queue_copy[0])<MIN_BATCH : time.sleep(0) # yield return with self.lock_queue: if len(self.train_queue_copy[0])<TR_FREQ: # more thread could have passed without lock return # we can't yield inside lock self.train_queue = random.sample(np.array(self.train_queue).T.tolist(), MIN_BATCH) self.train_queue = np.array(self.train_queue).T.tolist() s, a, r, s_, s_mask = self.train_queue self.train_queue_copy = [[], [], [], [], []] s = np.vstack(s) a = np.vstack(a) r = np.vstack(r) s_ = np.vstack(s_) s_mask = np.vstack(s_mask) if len(s) > 5 * MIN_BATCH: print("Optimizer alert! Minimizing batch of %d" % len(s)) v = self.predict_v(s_) r = r + GAMMA_N * v * s_mask # set v to 0 where s_ is terminal state s_t, a_t, r_t, minimize, loss = self.graph # self.new_max() print("Training...") for _ in range(TRAINING_ITERATIONS): self.session.run([minimize,loss], feed_dict={s_t: s, a_t: a, r_t: r}) print("Done...") # def new_max(self): # length = max([len(self.rewards[i]) for i in self.rewards.keys()]) # # print("--------" + str(length)) # if length>10: # R = np.average([np.average(self.rewards[i]) for i in self.rewards.keys() if self.rewards[i]!=[]]) # print("-------- R= " + str(R)) # print("-------- max reward " + str(self.max_reward)) # if R > self.max_reward: # print('new max found:') # print(R) # print("-------------------------------------------------------------------------------------------------") # brain.model.save("A3C+++" +str()+".h5") # print("Model saved") # self.max_reward = R # for i in range(0,DAYN-DAY0): # self.rewards[i] = [] def train_push(self, s, a, r, s_): with self.lock_queue: self.train_queue[0].append(s) self.train_queue[1].append(a) self.train_queue[2].append(r) self.train_queue_copy[0].append(s) self.train_queue_copy[1].append(a) self.train_queue_copy[2].append(r) if s_ is None: self.train_queue[3].append(NONE_STATE) self.train_queue[4].append(0.) self.train_queue_copy[3].append(NONE_STATE) self.train_queue_copy[4].append(0.) else: self.train_queue[3].append(s_) self.train_queue[4].append(1.) self.train_queue_copy[3].append(s_) self.train_queue_copy[4].append(1.) def predict(self, s): with self.default_graph.as_default(): tcl_p, price_p, deficiency_p, excess_p, v = self.model.predict(s) return [tcl_p, price_p, deficiency_p, excess_p], v def predict_p(self, s): with self.default_graph.as_default(): tcl_p, price_p, deficiency_p, excess_p, v = self.model.predict(s) return [tcl_p[0], price_p[0], deficiency_p[0], excess_p[0]] def predict_v(self, s): with self.default_graph.as_default(): tcl_p, price_p, deficiency_p, excess_p, v = self.model.predict(s) return v def predict_p_vote(self, s): # Boost learning. Several versions of the successfull models are voting for the best action votes=[] for filename in os.listdir(MODELS_DIRECTORY): if filename.endswith(".h5"): with self.default_graph.as_default(): try: self.model.load_weights(MODELS_DIRECTORY+"/"+filename) tcl_p, price_p, deficiency_p, excess_p, v = self.model.predict(s) votes.append([np.argmax(tcl_p),np.argmax(price_p),np.argmax(deficiency_p),np.argmax(excess_p)]) except: print(filename+"didn't vote!") pass boosted_p = np.average(np.array(votes),axis=0) return np.rint(boosted_p).astype(int)
def __init__(self, *args, **kwargs): super(Note, self).__init__(*args, **kwargs) # Regression for #13227 -- having an attribute that # is unpickleable doesn't stop you from cloning queries # that use objects of that type as an argument. self.lock = threading.Lock()
# Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see <http://www.gnu.org/licenses/>. import signal import threading from functools import wraps _synchronized_lock = threading.Lock() def synchronized(*args): """A synchronized function prevents two or more callers to interleave its execution preventing race conditions. The synchronized decorator accepts as optional parameter a Lock, RLock or Semaphore object which will be employed to ensure the function's atomicity. If no synchronization object is given, a single threading.Lock will be used. This implies that between different decorated function only one at a time will be executed. """ if callable(args[0]):
class LoopRunner(object): """ A helper to start and stop an IO loop in a controlled way. Several loop runners can associate safely to the same IO loop. Parameters ---------- loop: IOLoop (optional) If given, this loop will be re-used, otherwise an appropriate one will be looked up or created. asynchronous: boolean (optional, default False) If false (the default), the loop is meant to run in a separate thread and will be started if necessary. If true, the loop is meant to run in the thread this object is instantiated from, and will not be started automatically. """ # All loops currently associated to loop runners _all_loops = weakref.WeakKeyDictionary() _lock = threading.Lock() def __init__(self, loop=None, asynchronous=False): current = IOLoop.current() if loop is None: if asynchronous: self._loop = current else: # We're expecting the loop to run in another thread, # avoid re-using this thread's assigned loop self._loop = IOLoop() self._should_close_loop = True else: self._loop = loop self._should_close_loop = False self._asynchronous = asynchronous self._loop_thread = None self._started = False with self._lock: self._all_loops.setdefault(self._loop, (0, None)) def start(self): """ Start the IO loop if required. The loop is run in a dedicated thread. If the loop is already running, this method does nothing. """ with self._lock: self._start_unlocked() def _start_unlocked(self): assert not self._started count, real_runner = self._all_loops[self._loop] if (self._asynchronous or real_runner is not None or count > 0): self._all_loops[self._loop] = count + 1, real_runner self._started = True return assert self._loop_thread is None assert count == 0 loop_evt = threading.Event() done_evt = threading.Event() in_thread = [None] start_exc = [None] def loop_cb(): in_thread[0] = threading.current_thread() loop_evt.set() def run_loop(loop=self._loop): loop.add_callback(loop_cb) try: loop.start() except Exception as e: start_exc[0] = e finally: done_evt.set() thread = threading.Thread(target=run_loop, name="IO loop") thread.daemon = True thread.start() loop_evt.wait(timeout=1000) self._started = True actual_thread = in_thread[0] if actual_thread is not thread: # Loop already running in other thread (user-launched) done_evt.wait(5) if not isinstance(start_exc[0], RuntimeError): raise start_exc[0] self._all_loops[self._loop] = count + 1, None else: assert start_exc[0] is None, start_exc self._loop_thread = thread self._all_loops[self._loop] = count + 1, self def stop(self, timeout=10): """ Stop and close the loop if it was created by us. Otherwise, just mark this object "stopped". """ with self._lock: self._stop_unlocked(timeout) def _stop_unlocked(self, timeout): if not self._started: return self._started = False count, real_runner = self._all_loops[self._loop] if count > 1: self._all_loops[self._loop] = count - 1, real_runner else: assert count == 1 del self._all_loops[self._loop] if real_runner is not None: real_runner._real_stop(timeout) def _real_stop(self, timeout): assert self._loop_thread is not None if self._loop_thread is not None: try: self._loop.add_callback(self._loop.stop) self._loop_thread.join(timeout=timeout) self._loop.close() finally: self._loop_thread = None def is_started(self): """ Return True between start() and stop() calls, False otherwise. """ return self._started def run_sync(self, func, *args, **kwargs): """ Convenience helper: start the loop if needed, run sync(func, *args, **kwargs), then stop the loop again. """ if self._started: return sync(self.loop, func, *args, **kwargs) else: self.start() try: return sync(self.loop, func, *args, **kwargs) finally: self.stop() @property def loop(self): return self._loop
import socket import threading import socketserver import threading import json import util import socket import struct queue_lock = threading.Lock() msg_q = [] class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler): def setup(self): self.q_pos = 0 self.type = None print("{} :: {} connected".format(threading.current_thread().getName(), self.client_address)) def handle(self): while 1: self.to_queue() print("{} q_pos {} len queue {}".format( threading.current_thread().getName(), self.q_pos, len(msg_q))) def finish(self): print("{} diconnected".format(self.client_address)) def to_queue(self): s = str(util.recv_msg(self.request), 'utf-8')
# Импорты Python import time, sys, threading, signal, ipaddress # Сторонние пакеты import requests from lxml import etree # Наш конфигурационный файл sys.path.append('/etc/roskom') import config # Время начала работы скрипта execution_start = time.time() # Расставим затычки-мьютексы in_mutex = threading.Lock() out_mutex = threading.Lock() # Прикинемся браузером request_headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36', } # Счётчик обработанных ссылок (для отображения прогресса) counter = 0 # Наш воркер class Worker(threading.Thread): def __init__(self, thread_id, in_data, out_data, trace): threading.Thread.__init__(self), self.thread_id = thread_id
def __init__(self): self.condition = threading.Condition(threading.Lock())
def __init__(self, config_dict): threading.Thread.__init__(self, name="PurpleAirMonitor") self.config_dict = config_dict self._lock = threading.Lock() self._record = None self.running = False
# The full license information can be found in LICENSE.txt # in the root directory of this project. import logging from six.moves import queue as Queue import sys import threading import time from axon.client.traffic_elements import TrafficRule, \ Endpoint, Port, Protocol, Action, Connected _lock_debug = False # Default to False to avoid huge log files global_lock = threading.Lock() # Not currently used, but left for examples. static_lock = threading.Lock() store_func_exception_args = False log = logging.getLogger('utilities') def truncate_str(tr_str, num_chars, terminator=None): """ Truncates tr_str at num_chars and appends a terminator message. If num_chars == 0, return the original string. """ default_terminator = "<TRUNCATED>" if terminator is None: terminator = default_terminator if num_chars == 0 or len(tr_str) <= num_chars:
class Protocols(object): """Collection of protocol configurations. Used to describe a complete set of content-type mappings for multiple protocol configurations. Properties: names: Sorted list of the names of registered protocols. content_types: Sorted list of supported content-types. """ __default_protocols = None __lock = threading.Lock() def __init__(self): """Constructor.""" self.__by_name = {} self.__by_content_type = {} def add_protocol_config(self, config): """Add a protocol configuration to protocol mapping. Args: config: A ProtocolConfig. Raises: ServiceConfigurationError if protocol.name is already registered or any of it's content-types are already registered. """ if config.name in self.__by_name: raise ServiceConfigurationError( 'Protocol name %r is already in use' % config.name) for content_type in config.content_types: if content_type in self.__by_content_type: raise ServiceConfigurationError( 'Content type %r is already in use' % content_type) self.__by_name[config.name] = config self.__by_content_type.update((t, config) for t in config.content_types) def add_protocol(self, *args, **kwargs): """Add a protocol configuration from basic parameters. Simple helper method that creates and registeres a ProtocolConfig instance. """ self.add_protocol_config(ProtocolConfig(*args, **kwargs)) @property def names(self): return tuple(sorted(self.__by_name)) @property def content_types(self): return tuple(sorted(self.__by_content_type)) def lookup_by_name(self, name): """Look up a ProtocolConfig by name. Args: name: Name of protocol to look for. Returns: ProtocolConfig associated with name. Raises: KeyError if there is no protocol for name. """ return self.__by_name[name.lower()] def lookup_by_content_type(self, content_type): """Look up a ProtocolConfig by content-type. Args: content_type: Content-type to find protocol configuration for. Returns: ProtocolConfig associated with content-type. Raises: KeyError if there is no protocol for content-type. """ return self.__by_content_type[content_type.lower()] @classmethod def new_default(cls): """Create default protocols configuration. Returns: New Protocols instance configured for protobuf and protorpc. """ protocols = cls() protocols.add_protocol(protobuf, 'protobuf') protocols.add_protocol(protojson.ProtoJson.get_default(), 'protojson') return protocols @classmethod def get_default(cls): """Get the global default Protocols instance. Returns: Current global default Protocols instance. """ default_protocols = cls.__default_protocols if default_protocols is None: with cls.__lock: default_protocols = cls.__default_protocols if default_protocols is None: default_protocols = cls.new_default() cls.__default_protocols = default_protocols return default_protocols @classmethod def set_default(cls, protocols): """Set the global default Protocols instance. Args: protocols: A Protocols instance. Raises: TypeError: If protocols is not an instance of Protocols. """ if not isinstance(protocols, Protocols): raise TypeError( 'Expected value of type "Protocols", found %r' % protocols) with cls.__lock: cls.__default_protocols = protocols
def __init__(self, count=4, daemon=True): self.count = count self.daemon = daemon self._lock = threading.Lock() self.initialize()
class Environment: _env_cache = [] _env_lock = threading.Lock() def __init__(self, problem, repeat_steps=1, gamma=0.99, run_name=None): self.problem = problem self.repeat_steps = repeat_steps self.gamma = gamma env = gym.make(problem) env.seed(random.randint(1, 999)) self.state_size = env.observation_space.shape[0] if type(env.action_space) == gym.spaces.discrete.Discrete: self.action_size = env.action_space.n self.action_continuous = False elif type(env.action_space) == gym.spaces.box.Box: self.action_size = env.action_space.shape[0] self.action_continuous = True if not ((env.action_space.high == 1.).all() and (env.action_space.low == -1.).all()): warnings.warn('Expecting action range (-1,1), found: %s - %s' % (env.action_space.low, env.action_space.high)) else: raise Exception('Unkown action space type') self.timestep_limit = env.spec.timestep_limit # How much reward assume we keep getting per step if we get cutoff by timestep_limit self.cutoff_reward = self._get_cutoff_reward(env) * repeat_steps self._env_cache.append(env) self._last_env = None self.episode = Counter() self.episode_rewards = [] self.total_steps = Counter() self.start_time = time.time() self.agent_parameters = None self.run_name = None self.log = None if run_name is not None and LOG_DIR is not None: log_path = find_new_path(LOG_DIR + '/' + run_name) self.run_name = log_path.split('/')[-1] self.log = tf.summary.FileWriter(log_path) self.log.close() def _get_cutoff_reward(self, env): if env.spec.id.startswith("CartPole-"): return 1 if env.spec.id.startswith("MountainCar-"): return -1 return 0 def close(self): if self.log: self.log.close() def log_summary(self, summary_file): if summary_file is not None and self.episode.val() > 0: elapsed = time.time() - self.start_time with open(summary_file, "a") as f: summary = { 'problem': self.problem, 'run_name': self.run_name, 'episodes': self.episode.val(), 'steps': self.total_steps.val(), 'sum_reward': np.sum(self.episode_rewards), 'avg_reward': np.mean(self.episode_rewards), 'avg_reward_last100': np.mean(self.episode_rewards[-100:]), 'elapsed': elapsed, 'fps': self.total_steps.val() / (elapsed + 0.000001), 'repeat_steps': self.repeat_steps, **self.agent_parameters } print(json.dumps(summary), file = f) def _get_available_env(self): with self._env_lock: if self._env_cache: env = self._env_cache.pop() else: env = gym.make(self.problem) self._last_env = env return env def _return_available_env(self, env): with self._env_lock: self._env_cache.append(env) def _act_random(self, state): return (random.randint(0, self.action_size-1), 0, None) def run(self, agent, train=True, # If false, will not train brain (but still gather DQN experience) explore=True, # If true, will use epsilon-exploration, if false will be greedy random=False, # If true, will act random, not according to brain log_tensorboard=False, log_print=True, render=False, # If true, will render render_delay=0 # If rendering, will put delay ): episode = self.episode.inc() step = 0 metrics = self.EpisodeMetrics(self, agent, episode, self.gamma) if self.agent_parameters is None: self.agent_parameters = agent.get_parameters() env = self._get_available_env() state = env.reset() agent.episode_start(train) done = False while not done: step += 1 global_step = self.total_steps.inc() [action, value, eps, info] = ( agent.act(state, global_step, explore) if not random else self._act_random(state)) reward = 0 for _ in range(self.repeat_steps): next_state, r, done, _ = env.step(action) reward += r if render: env.render() time.sleep(render_delay) if done: break reward_plus = reward if done: if step < self.timestep_limit / self.repeat_steps: # Actual game-over next_state = None else: # Episode interrupted because of time - don't treat it as final state in Q-learning # reward_plus is with all expected future-rewards if we keep running reward_plus += self.cutoff_reward * self.gamma / (1 - self.gamma) agent.observe([state, action, reward, next_state, info], train, done) metrics.observe_step(step, done, reward, reward_plus, value, eps) state = next_state self._return_available_env(env) self.episode_rewards.append(metrics.total_reward) metrics.log_episode_finish(self.log, step, next_state is not None, explore, log_tensorboard, log_print) class EpisodeMetrics: def __init__(self, env, agent, episode, gamma): self.env = env self.agent = agent self.episode = episode self.gamma = gamma self.total_reward = 0 self.Qs = [] self.rewards = [] self.epsilons = [] self.start_time = time.time() self.start_steps = env.total_steps.val() def observe_step(self, step, done, reward, reward_plus, Q, eps): self.total_reward += reward self.Qs.append(Q) self.rewards.append(reward_plus) self.epsilons.append(eps) def log_episode_finish(self, log, steps, is_timestep_limit, explore, log_tensorboard, log_print): elapsed = time.time() - self.start_time agent = self.agent env = self.env (first_Q, last_Q) = (self.Qs[0], self.Qs[-1]) avg_Q = np.average(self.Qs) n = len(self.rewards) cum_rewards = np.zeros(n) r = 0 for i in range(n-1, -1, -1): r = r * self.gamma + self.rewards[i] cum_rewards[i] = r dQ = cum_rewards - self.Qs (first_dQ, last_dQ) = (dQ[0], dQ[-1]) rms_dQ = np.sqrt(np.average(np.square(dQ))) total_steps = env.total_steps.val() steps_diff = total_steps - self.start_steps fps = steps_diff/(elapsed+0.000001) if log_print: print("{:4.0f} /{:7.0f} :: time_limit={} reward1={:3.0f}, reward10={:3.0f}, reward100={:3.0f}, eps={:.3f}, fps={:4.0f}".format( self.episode, total_steps, is_timestep_limit, self.total_reward, np.mean(self.env.episode_rewards[-10:]), np.mean(self.env.episode_rewards[-100:]), np.mean(self.epsilons), fps)) if log_tensorboard and log is not None: log.reopen() prefix = 'metrics/' if explore else 'metrics_test/' # first row log_metric(log, prefix + '_Reward1', self.total_reward, total_steps) log_metric(log, prefix + '_Reward10', np.mean(self.env.episode_rewards[-10:]), total_steps) log_metric(log, prefix + '_epsilon', np.mean(self.epsilons), total_steps) # second row log_metric(log, prefix + 'Q_avg', avg_Q, total_steps) log_metric(log, prefix + 'Q_first', first_Q, total_steps) if steps < self.env.timestep_limit: log_metric(log, prefix + 'Q_last', last_Q, total_steps) # third row log_metric(log, prefix + 'dQ_arms', rms_dQ, total_steps) log_metric(log, prefix + 'dQ_first', first_dQ, total_steps) log_metric(log, prefix + 'dQ_last', last_dQ, total_steps) log_metric(log, 'monitor/z_episodes', self.episode, total_steps) log_metric(log, 'monitor/z_fps', fps, total_steps) if self.episode == 1: for (p, v) in agent.get_parameters().items(): log_metric(log, 'params/' + p, v)
def __init__(self, model, got_focus, max_size=None): urwid.ListBox.__init__(self,model) self._got_focus=got_focus self.max_size=max_size self._lock=threading.Lock()
if 'INTAKE_ESM_CONFIG' in os.environ: PATH = os.environ['INTAKE_ESM_CONFIG'] paths.append(PATH) elif os.path.exists(os.path.join(os.getcwd(), '.intake_esm')): PATH = os.path.join(os.getcwd(), '.intake_esm') paths.append(PATH) else: PATH = os.path.join(os.path.expanduser('~'), '.intake_esm') global_config = config = {} config_lock = threading.Lock() defaults = [] def update(old, new, priority='new'): """ Update a nested dictionary with values from another This is like dict.update except that it smoothly merges nested values This operates in-place and modifies old Parameters ---------- priority: string {'old', 'new'} If new (default) then the new dictionary has preference. Otherwise the old dictionary does. """
class Log(): """ 로그 파일 저장 클래스 """ strDirName = '' strDate = '' iLevel = LogLevel.ERROR | LogLevel.SYSTEM bOpen = False iIndex = 1 iLogSize = 0 iMaxLogSize = 10000000 bWindow = False clsMutex = threading.Lock() @classmethod def SetDirectory( cls, strDirName ): """ 로그 파일을 저정할 폴더를 설정한다. Args: strDirName (string): 로그 파일을 저장할 폴더 path """ if( os.path.isdir( strDirName ) == False ): os.mkdir( strDirName ) cls.strDirName = strDirName if( platform.system() == 'Windows' ): cls.bWindow = True @classmethod def Print( cls, eLevel, strText ): """ 로그 메시지를 저장한다. Args: eLevel (LogLevel): 로그 레벨 strText (string): 로그 메시지 """ if( ( cls.iLevel & eLevel ) == 0 ): return if( eLevel == LogLevel.ERROR ): strHeader = "[ERROR] " elif( eLevel == LogLevel.INFO ): strHeader = "[INFO] " elif( eLevel == LogLevel.DEBUG ): strHeader = "[DEBUG] " elif( eLevel == LogLevel.NETWORK ): strHeader = "[NETWORK] " elif( eLevel == LogLevel.SYSTEM ): strHeader = "[SYSTEM] " clsTime = datetime.now() strDate = clsTime.strftime("%Y%m%d") strTime = clsTime.strftime("%H:%M:%S.%f") strLog = "[" + strTime + "] " + strHeader + "[" + str(threading.get_ident()) + "] " + strText if( len(cls.strDirName) > 0 ): if( cls.bWindow ): strLog += "\r\n" else: strLog += "\n" bOpen = False cls.clsMutex.acquire() if( cls.strDate != strDate ): cls.iIndex = 1 bOpen = True elif( cls.iLogSize > cls.iMaxLogSize ): cls.iIndex += 1 bOpen = True if( bOpen ): if( cls.bOpen ): cls.fd.close() cls.bOpen = False cls.iLogSize = 0 strFileName = cls.strDirName + "/" + strDate + "_" + str(cls.iIndex) + ".txt" cls.fd = open( strFileName, "ab" ) cls.bOpen = True arrBuf = strLog.encode() cls.iLogSize = len( arrBuf ) cls.fd.write( arrBuf ) cls.fd.flush() cls.clsMutex.release() else: print( strLog ) @classmethod def SetLevel( cls, iLevel ): """ 로그 레벨을 설정한다. Args: iLevel (LogLevel): 로그 레벨 (예: LogLevel.DEBUG | LogLevel.INFO) """ cls.iLevel = LogLevel.ERROR | LogLevel.SYSTEM cls.iLevel |= iLevel
class Brain: train_queue = [[], [], [], [], []] # s, a, r, s', s' terminal mask lock_queue = threading.Lock() def __init__(self): self.session = tf.Session() K.set_session(self.session) K.manual_variable_initialization(True) self.model = self._build_model() if loader_toggle: self.model.load_weights('model_saved.h5f') print('Weights loaded') self.graph = self._build_graph(self.model) self.session.run(tf.global_variables_initializer()) print(self.model.get_weights()) self.default_graph = tf.get_default_graph() self.default_graph.finalize() # comment out to allow for saving of model and weights def _build_model(self): l_input = Input(batch_shape=(None, NUM_STATE)) l_dense = Dense(16, activation='relu')(l_input) out_actions = Dense(NUM_ACTIONS, activation='softmax')(l_dense) out_value = Dense(1, activation='linear')(l_dense) if loader_toggle: model = load_model('model_saved.h5f') print('Saved model has been loaded') else: model = Model(inputs=[l_input], outputs=[out_actions, out_value]) model._make_predict_function() # have to initialize before threading return model def _build_graph(self, model): s_t = tf.placeholder(tf.float32, shape=(None, NUM_STATE)) a_t = tf.placeholder(tf.float32, shape=(None, NUM_ACTIONS)) r_t = tf.placeholder(tf.float32, shape=(None, 1)) # not immediate, but discounted n step reward p, v = model(s_t) log_prob = tf.log(tf.reduce_sum(p * a_t, axis=1, keep_dims=True) + 1e-10) advantage = r_t - v loss_policy = - log_prob * tf.stop_gradient(advantage) # maximize policy loss_value = LOSS_V * tf.square(advantage) # minimize value error entropy = LOSS_ENTROPY * tf.reduce_sum(p * tf.log(p + 1e-10), axis=1, keep_dims=True) # maximize entropy (regularization) loss_total = tf.reduce_mean(loss_policy + loss_value + entropy) # optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE) optimizer = tf.train.AdamOptimizer(LEARNING_RATE) # optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, decay=.99) minimize = optimizer.minimize(loss_total) #self.saver = tf.train.Saver([s_t, a_t, r_t, minimize]) return s_t, a_t, r_t, minimize def optimize(self): if len(self.train_queue[0]) < MIN_BATCH: time.sleep(0) # yield return with self.lock_queue: if len(self.train_queue[0]) < MIN_BATCH: # more thread could have passed without lock return # we can't yield inside lock s, a, r, s_, s_mask = self.train_queue self.train_queue = [[], [], [], [], []] s = np.vstack(s) a = np.vstack(a) r = np.vstack(r) s_ = np.vstack(s_) s_mask = np.vstack(s_mask) if len(s) > 5 * MIN_BATCH: print("Optimizer alert! Minimizing batch of %d" % len(s)) v = self.predict_v(s_) r = r + GAMMA_N * v * s_mask # set v to 0 where s_ is terminal state s_t, a_t, r_t, minimize = self.graph self.session.run(minimize, feed_dict={s_t: s, a_t: a, r_t: r}) def train_push(self, s, a, r, s_): with self.lock_queue: self.train_queue[0].append(s) self.train_queue[1].append(a) self.train_queue[2].append(r) if s_ is None: self.train_queue[3].append(NONE_STATE) self.train_queue[4].append(0.) else: self.train_queue[3].append(s_) self.train_queue[4].append(1.) def predict(self, s): with self.default_graph.as_default(): p, v = self.model.predict(s) return p, v def predict_p(self, s): with self.default_graph.as_default(): p, v = self.model.predict(s) return p def predict_v(self, s): with self.default_graph.as_default(): p, v = self.model.predict(s) return v def saving(self): if saver_toggle: self.model.save_weights('test_file.h5f') tf.keras.models.save_model( self.model, 'model_saved.h5f', overwrite=True, include_optimizer=True ) # self.model.save('model_saved.h5f', overwrite=True, include_optimizer=True) # self.saver.save(tf.Session(), 'my-model', global_step=999) print("Model Saved...")
def __init__(self): self.queue = list() self.index = 0 self.lock = threading.Lock()
def test_failed_request2(self): """ A request is "failed" if it throws an exception while executing. The exception should be forwarded to ALL waiting requests, which should re-raise it. """ class CustomRuntimeError(RuntimeError): pass def impossible_workload(): time.sleep(0.2) raise CustomRuntimeError("Intentional exception.") impossible_req = Request(impossible_workload) def wait_for_impossible(): # This request will fail... impossible_req.wait() # Since there are some exception guards in the code we're testing, # spit something out to stderr just to be sure this error # isn't getting swallowed accidentally. sys.stderr.write("ERROR: Shouldn't get here.") assert False, "Shouldn't get here." req1 = Request(wait_for_impossible) req2 = Request(wait_for_impossible) failed_ids = [] lock = threading.Lock() def handle_failed_req(req_id, failure_exc, exc_info): assert isinstance(failure_exc, CustomRuntimeError) with lock: failed_ids.append(req_id) req1.notify_failed(partial(handle_failed_req, 1)) req2.notify_failed(partial(handle_failed_req, 2)) try: req1.submit() except: # submit may fail here if in single-threaded debug mode. assert Request.global_thread_pool.num_workers == 0 try: req2.submit() except: # submit may fail here if in single-threaded debug mode. assert Request.global_thread_pool.num_workers == 0 try: req1.wait() except RuntimeError: pass else: # submit may fail here if in single-threaded debug mode. if Request.global_thread_pool.num_workers > 0: assert False, "Expected an exception from that request, but didn't get it." try: req2.wait() except RuntimeError: pass else: # submit may fail here if in single-threaded debug mode. if Request.global_thread_pool.num_workers > 0: assert False, "Expected an exception from that request, but didn't get it." assert 1 in failed_ids assert 2 in failed_ids
def __init__(self, args: list, **kwargs): parser = argparse.ArgumentParser() self.add_arguments(parser=parser) self.arguments = parser.parse_args(args) # Configure connection to the chain provider = HTTPProvider(endpoint_uri=self.arguments.rpc_host, request_kwargs={'timeout': self.arguments.rpc_timeout}) self.web3: Web3 = kwargs['web3'] if 'web3' in kwargs else Web3(provider) self.web3.eth.defaultAccount = self.arguments.eth_from register_keys(self.web3, self.arguments.eth_key) self.our_address = Address(self.arguments.eth_from) # Check configuration for retrieving urns/bites if self.arguments.type == 'flip' and self.arguments.create_auctions \ and self.arguments.from_block is None and self.arguments.vulcanize_endpoint is None: raise RuntimeError("Either --from-block or --vulcanize-endpoint must be specified to kick off " "flip auctions") if self.arguments.type == 'flip' and not self.arguments.ilk: raise RuntimeError("--ilk must be supplied when configuring a flip keeper") if self.arguments.type == 'flop' and self.arguments.create_auctions \ and self.arguments.from_block is None: raise RuntimeError("--from-block must be specified to kick off flop auctions") self.addresses_path = kwargs["addresses_path"] if "addresses-path" in kwargs else self.arguments.addresses_path # Configure core and token contracts if self.addresses_path is not None: self.mcd = DssDeployment.from_json(web3=self.web3, conf=open(self.addresses_path, "r").read()) else: self.mcd = DssDeployment.from_node(web3=self.web3) self.vat = self.mcd.vat self.cat = self.mcd.cat self.vow = self.mcd.vow self.mkr = self.mcd.mkr self.dai_join = self.mcd.dai_adapter if self.arguments.type == 'flip': self.collateral = self.mcd.collaterals[self.arguments.ilk] self.ilk = self.collateral.ilk self.gem_join = self.collateral.adapter else: self.collateral = None self.ilk = None self.gem_join = None # Configure auction contracts self.flipper = self.collateral.flipper if self.arguments.type == 'flip' else None self.flapper = self.mcd.flapper if self.arguments.type == 'flap' else None self.flopper = self.mcd.flopper if self.arguments.type == 'flop' else None self.urn_history = None if self.flipper: self.min_flip_lot = Wad.from_number(self.arguments.min_flip_lot) self.strategy = FlipperStrategy(self.flipper, self.min_flip_lot) self.urn_history = UrnHistory(self.web3, self.mcd, self.ilk, self.arguments.from_block, self.arguments.vulcanize_endpoint) elif self.flapper: self.strategy = FlapperStrategy(self.flapper, self.mkr.address) elif self.flopper: self.strategy = FlopperStrategy(self.flopper) else: raise RuntimeError("Please specify auction type") # Create the collection used to manage auctions relevant to this keeper self.auctions = Auctions(flipper=self.flipper.address if self.flipper else None, flapper=self.flapper.address if self.flapper else None, flopper=self.flopper.address if self.flopper else None, model_factory=ModelFactory(' '.join(self.arguments.model))) self.auctions_lock = threading.Lock() self.dead_since = {} self.lifecycle = None # logging.basicConfig(format='%(asctime)-15s %(levelname)-8s %(message)s', # level=(logging.DEBUG if self.arguments.debug else logging.INFO)) # Create gas strategy used for non-bids and bids which do not supply gas price self.gas_price = DynamicGasPrice(self.arguments) self.vat_dai_target = Wad.from_number(self.arguments.vat_dai_target) if \ self.arguments.vat_dai_target is not None else None # Configure account(s) for which we'll deal auctions self.deal_all = False self.deal_for = set() if self.arguments.deal_for is None: self.deal_for.add(self.our_address) elif len(self.arguments.deal_for) == 1 and self.arguments.deal_for[0].upper() in ["ALL", "NONE"]: if self.arguments.deal_for[0].upper() == "ALL": self.deal_all = True # else no auctions will be dealt elif len(self.arguments.deal_for) > 0: for account in self.arguments.deal_for: self.deal_for.add(Address(account)) # reduce logspew setup_logging(self.arguments)
import traceback import threading import time from datetime import datetime from email.mime.application import MIMEApplication from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from wpscan_out_parse.formatter import format_results from wpwatcher import log, VERSION from wpwatcher.utils import get_valid_filename, replace # Date format used everywhere DATE_FORMAT = '%Y-%m-%dT%H-%M-%S' # Sendmail call will be done one at a time not over load server and create connection errors mail_lock = threading.Lock() class WPWatcherNotification(): '''Send conditions logic + build and send mail reports''' def __init__(self, conf): # store specific mailserver values self.from_email = conf['from_email'] self.smtp_server = conf['smtp_server'] self.smtp_ssl = conf['smtp_ssl'] self.smtp_auth = conf['smtp_auth'] self.smtp_user = conf['smtp_user'] self.smtp_pass = conf['smtp_pass'] # store specific notification values self.send_email_report = conf['send_email_report']
import locale import threading import time import requests import json import traceback import feedparser from PIL import Image, ImageTk from contextlib import contextmanager your_province = '' # 서울, 전북, 대전. 전남 알아서 설정하면됩니다. gov_api_key = '' # Type your OpenAPI key for showing microdust microdust_req_url = 'http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnMesureSidoLIst?serviceKey=%s&numOfRows=10&pageNo=1&sidoName=%s&searchCondition=DAILY&_returnType=json' % (gov_api_key, your_province) LOCALE_LOCK = threading.Lock() ip_req_url = "http://ip-api.com/json" ip_data = requests.get(ip_req_url).text ip_data_region = json.loads(ip_data) city_code = '1842025' ui_locale = '' # '' as default time_format = 12 # 12 or 24 date_format = "%b %d, %Y" # check python doc for strftime() for options news_country_code = 'kr' weather_api_token = '' # openweathermap API key # google_map_api = '' currentlong = ip_data_region['lon'] currentlat = ip_data_region['lat'] latitude = None # Set this if IP location lookup does not work for you (must be a string) longitude = None # Set this if IP location lookup does not work for you (must be a string) xlarge_text_size = 110
import uuid import sys import time #select * from MicroServiceChainChoice JOIN MicroServiceChains on chainAvailable = MicroServiceChains.pk; #| pk | choiceAvailableAtLink | chainAvailable | pk | startingLink | description from linkTaskManager import linkTaskManager from taskStandard import taskStandard import jobChain import databaseInterface import lxml.etree as etree import os import archivematicaMCP global choicesAvailableForUnits choicesAvailableForUnits = {} choicesAvailableForUnitsLock = threading.Lock() class linkTaskManagerLoadMagicLink: """Load a link from the unit to process. Deprecated! Replaced with Set/Load Unit Variable""" def __init__(self, jobChainLink, pk, unit): self.pk = pk self.jobChainLink = jobChainLink self.UUID = uuid.uuid4().__str__() self.unit = unit ###Update the unit magicLink = self.unit.getMagicLink() if magicLink != None: link, exitStatus = magicLink self.jobChainLink.setExitMessage("Completed successfully")
def main(): #--------START Command Line option parser------------------------------------------------------ usage = "usage: %prog " parser = OptionParser(usage=usage) h_adsb_ip = "Set ADSB Modem IP [default=%default]" h_adsb_port = "Set ADSB Modem Port [default=%default]" h_expire = "Set current list expiration timeout [default=%default [sec]]" h_gs_lat = "Set GS Latitude [default=%default [deg]]" h_gs_lon = "Set GS Longitude [default=%default [deg]]" h_gs_alt = "Set GS Altitude [default=%default [km]]" parser.add_option("-a", "--adsb_ip", dest="adsb_ip", type="string", default="198.82.148.60", help=h_adsb_ip) parser.add_option("-p", "--adsb_port", dest="adsb_port", type="int", default="30003", help=h_adsb_port) parser.add_option("-e", "--expire", dest="expire", type="float", default="60", help=h_expire) parser.add_option("", "--gs_lat", dest="gs_lat", type="float", default="37.202195", help=h_gs_lat) parser.add_option("", "--gs_lon", dest="gs_lon", type="float", default="-80.406851", help=h_gs_lon) parser.add_option("", "--gs_alt", dest="gs_alt", type="float", default="0.630936", help=h_gs_alt) (options, args) = parser.parse_args() #--------END Command Line option parser------------------------------------------------------ lock = threading.Lock() server_thread = Data_Server(options, lock) server_thread.daemon = True #server_thread.run() #blocking server_thread.start() #Non-block #print "Non Block" #sys.exit() app = QtGui.QApplication(sys.argv) win = adsb_gui(lock) win.set_callback(server_thread) #ex = adsb_gui() #server_thread.set_gui_access(ex) sys.exit(app.exec_())