def encode_worker( realtime_vocoder: RealtimeVocoder, time_length: float, extra_time: float, queue_input: Queue, queue_output: Queue, acquired_lock: Lock, ): logger = logging.getLogger('encode') init_logger(logger) logger.info('encode worker') stream = EncodeStream(vocoder=realtime_vocoder) stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time) acquired_lock.release() start_time = extra_time while True: item: Item = queue_input.get() start = time.time() wave: numpy.ndarray = item.item stream.add(start_time=start_time, data=wave) start_time += time_length feature_wrapper: AcousticFeatureWrapper = stream_wrapper.process_next( time_length=time_length) item.item = feature_wrapper queue_output.put(item) logger.debug(f'{item.index}: {time.time() - start}')
def f(lock: Lock, i: int): lock.acquire() try: print('hello world', i) print('hello world', i + 1) finally: lock.release()
def __init__(self): self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._make_methods()
def __init__(self, pid): Daemon.__init__(self, pid) # Reddit https://praw.readthedocs.io/en/stable/pages/comment_parsing.html self.reddit = {} self.default_subs = 'pollster_bot' self.bot_name = 'pollster_bot' self.version = '1.0' self.touched_comment_ids = [] # create logger self.logger = logging.getLogger('Pollster_Bot') self.logger.setLevel(logging.INFO) # File handler set to DEBUG fh = logging.FileHandler(filename=os.path.join( os.path.dirname(__file__), 'PollsterBotLog.txt')) fh.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) fh.setFormatter(formatter) # add ch, fh to logger self.logger.addHandler(ch) self.logger.addHandler(fh) self.lock = Lock() self.logger.info('Starting Pollster Bot ver. ' + self.version) # Huffington post http://elections.huffingtonpost.com/pollster/api self.uri = 'http://elections.huffingtonpost.com/pollster/api/charts.json' # Set states self.states = {} self.states = self.load_json_file('data/states.json') # phrases phrases = self.load_json_file('data/phrases.json') self.greetings = phrases['greeting'] self.winning = phrases['winning'] self.losing = phrases['losing'] # keywords to call the pollster bot self.keywords = self.load_json_file('data/keywords.json')['keywords'] # subs subs = self.load_json_file('data/subs.json')['subs'] for sub in subs: self.default_subs += '+' + sub self.log_in_credentials = self.load_json_file( 'data/login_credentials.json')
def decode_worker( realtime_vocoder: RealtimeVocoder, time_length: float, extra_time: float, vocoder_buffer_size: int, out_audio_chunk: int, output_silent_threshold: float, queue_input: Queue, queue_output: Queue, acquired_lock: Lock, ): logger = logging.getLogger('decode') init_logger(logger) logging.info('decode worker') realtime_vocoder.create_synthesizer( buffer_size=vocoder_buffer_size, number_of_pointers=16, ) stream = DecodeStream(vocoder=realtime_vocoder) stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time) acquired_lock.release() start_time = extra_time wave_fragment = numpy.empty(0) while True: item: Item = queue_input.get() start = time.time() feature: AcousticFeature = item.item stream.add( start_time=start_time, data=feature, ) start_time += time_length wave = stream_wrapper.process_next(time_length=time_length) wave_fragment = numpy.concatenate([wave_fragment, wave]) if len(wave_fragment) >= out_audio_chunk: wave, wave_fragment = wave_fragment[: out_audio_chunk], wave_fragment[ out_audio_chunk:] power = librosa.core.power_to_db(numpy.abs( librosa.stft(wave))**2).mean() if power < -output_silent_threshold: wave = None # pass else: wave = None item.item = wave queue_output.put(item) logger.debug(f'{item.index}: {time.time() - start}')
class AudioOutputLoop(Thread): def __init__(self, audioDevice, sampleRate = 44100, bufferSize = 512): Thread.__init__(self, name = "AudioOutputLoop") self._lock = Lock() self._isRunning = False self._audioDevice = audioDevice self._bufferSize = bufferSize self._sampleRate = sampleRate self._maxTimePerBlockInSec = bufferSize / sampleRate pygame.mixer.init(deviceName = audioDevice.name, frequency = sampleRate, buffer = bufferSize) self._channel = pygame.mixer.find_channel() self.testWave = wave.open("./tests/resources/test.wav", "r") self.totalFrames = self.testWave.getnframes() self.allsamples = self.testWave.readframes(self.totalFrames) self._byteArray = bytearray(self._bufferSize * 2) self.numFrames = 0 logger.info("Mixer initialized") def terminate(self): pygame.mixer.quit() def run(self): logger.debug("AudioOutputLoop started") self._isRunning = True while self._isRunning: startTime = clock() self._lock.acquire(True) # do cool stuff here if self.numFrames + self._bufferSize > self.totalFrames: self._isRunning = False else: startFrame = self.numFrames endFrame = startFrame + self._bufferSize * 2 bytesOut = bytes(self.allsamples[startFrame:endFrame]) sound = pygame.mixer.Sound(self.allsamples[startFrame:endFrame]) self._channel.play(sound) self.numFrames += self._bufferSize * 2 stopTime = clock() timeInBlockInSec = (stopTime - startTime) sleepTime = self._maxTimePerBlockInSec - timeInBlockInSec if sleepTime < 0: logger.warn("Audio dropout!") else: logger.info("CPU: %f", 100 * timeInBlockInSec / self._maxTimePerBlockInSec) sleep(sleepTime) self._lock.release()
def __init__(self, maxsize = 0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) return
def __init__(self, delegate, pollIntervalInMs = 25): Thread.__init__(self, name = "MidiEventLoop") self._lock = Lock() self._isRunning = False self.delegate = delegate self._pollInterval = pollIntervalInMs / 1000 self.devices = None self.midiMappings = MidiMappingTable(PathFinder.findUserFile('midimappings.sql'))
def convert_worker( acoustic_converter: AcousticConverter, super_resolution: SuperResolution, time_length: float, extra_time: float, input_silent_threshold: float, queue_input: Queue, queue_output: Queue, acquired_lock: Lock, ): logger = logging.getLogger('convert') init_logger(logger) logging.info('convert worker') chainer.global_config.enable_backprop = False chainer.global_config.train = False stream = ConvertStream(voice_changer=VoiceChanger( super_resolution=super_resolution, acoustic_converter=acoustic_converter, threshold=input_silent_threshold, )) stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time) acquired_lock.release() start_time = extra_time while True: item: Item = queue_input.get() start = time.time() in_feature: AcousticFeatureWrapper = item.item stream.add( start_time=start_time, data=in_feature, ) start_time += time_length out_feature = stream_wrapper.process_next(time_length=time_length) item.item = out_feature queue_output.put(item) logger.debug(f'{item.index}: {time.time() - start}')
def test_get_instance_with_concurrent_mode(self, mr_lock: LockFactory): try: _lock = mr_lock.get_instance() except ValueError as ve: assert "FeatureMode is None. Please configure it as one of 'multirunnable.mode.FeatureMode'." in str( ve), "It should set the FeatureMode first." mr_lock.feature_mode = FeatureMode.Concurrent _lock = mr_lock.get_instance() from threading import Lock assert _lock is not None and isinstance(_lock, type(Lock( ))) is True, "This type of Lock instance should be 'threading.Lock'."
def __init__(self, maxsize = 0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork)
def __init__(self, maxsize=0): if maxsize <= 0: maxsize = 2147483647L self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork)
class MidiEventLoop(Thread): def __init__(self, delegate, pollIntervalInMs = 25): Thread.__init__(self, name = "MidiEventLoop") self._lock = Lock() self._isRunning = False self.delegate = delegate self._pollInterval = pollIntervalInMs / 1000 self.devices = None self.midiMappings = MidiMappingTable(PathFinder.findUserFile('midimappings.sql')) def quit(self): self._lock.acquire(True) if self.devices is not None: self.devices.closeAll() self._isRunning = False self._lock.release() logger.info("Closed all MIDI devices") def run(self): # Initialize is done here so as not to block the main thread self.devices = MidiDeviceList() self.devices.openAll() logger.debug("MidiEventLoop started") self._isRunning = True while self._isRunning: self._lock.acquire(True) for device in self.devices.openedInputs(): while device.poll(): self._parseEvent(device.readEvents()) self._lock.release() sleep(self._pollInterval) pygame.midi.quit() def _parseEvent(self, eventList): eventData = eventList[0][0] midiEvent = MidiEvent(eventData[0], eventData[1], eventData[2], eventList[0][1]) self.midiMappings.process(midiEvent, self.delegate)
def __init__(self, audioDevice, sampleRate = 44100, bufferSize = 512): Thread.__init__(self, name = "AudioOutputLoop") self._lock = Lock() self._isRunning = False self._audioDevice = audioDevice self._bufferSize = bufferSize self._sampleRate = sampleRate self._maxTimePerBlockInSec = bufferSize / sampleRate pygame.mixer.init(deviceName = audioDevice.name, frequency = sampleRate, buffer = bufferSize) self._channel = pygame.mixer.find_channel() self.testWave = wave.open("./tests/resources/test.wav", "r") self.totalFrames = self.testWave.getnframes() self.allsamples = self.testWave.readframes(self.totalFrames) self._byteArray = bytearray(self._bufferSize * 2) self.numFrames = 0 logger.info("Mixer initialized")
#!/usr/bin/env python # coding=utf-8 # Python 3.6 from multiprocessing.synchronize import Lock from PIL import Image from utils import * pathlock = Lock() # type: Lock class TextureConvert(object): def __init__(self): self.args = get_args() self.tool = "PVRTexToolCLI" pass def execute(self): print("BuildTool excute >") pass def get_texture_format(self, option, exists_alpha=True): args = get_args() if args.convert_tool == "PVRTexToolCLI": if option == "ETC1": return "ETC1" elif option == "ETC2": if exists_alpha: return "ETC2_RGBA" else: return "ETC2_RGB" elif args.convert_tool == "etctool":
def process(self, artifact: Artifact, discovery: Dict[str, DiscoveryState], objects: List[Artifact], lock: LockT) -> List[Artifact]: new_artifacts: List[Artifact] = [] new_discovery: Dict[str, DiscoveryState] = {} new_objects: List[Artifact] = [] # Identify tasks that are completely new if (artifact.state is New and artifact.filetype is Unknown): # Use the pathmap list to work out the # filetype and starting state new_artifact = None for pathmap in self._pathmaps: if artifact.location in pathmap: new_artifact = Artifact(artifact.location, pathmap.filetype, pathmap.state) # Assuming we found a match and were able # to create the artifact, return it so that # it can be added to the queue if new_artifact is not None: # Also store its hash in the file database file_info = FileInfoDatabase(self._database) file_info.add_file_info(artifact.location, new_artifact.hash) new_artifacts.append(new_artifact) elif artifact.state is Analysed: # Work out whether this artifact needs to be # included in the build or not - if any of its # definitions are mentioned in the (shared) # discovery mapping, or if it is defining # the target of the build then it should be included # TODO: Looping through a list of what could # eventually contain every unit/symbol in the build has # the potential to become an issue for performance. # Longer term we probably want to drop using the shared # discovery array in favour of database lookups required = False for definition in artifact.defines: # Is this the target? if (definition == self.target or definition in discovery): required = True break if required: # Update the discovery list to indicate that # the definitions from this Artifact are present # (but not yet compiled) for definition in artifact.defines: if definition not in discovery: new_discovery[definition] = DiscoveryState.SEEN # Now check whether the Artifact's dependencies # have already been seen and compiled compiled = [False]*len(artifact.depends_on) for idep, dependency in enumerate(artifact.depends_on): # Only applies to str dependencies if isinstance(dependency, Path): continue if dependency in discovery: # Are the dependencies compiled? if discovery[dependency] == DiscoveryState.COMPILED: compiled[idep] = True else: # If the dependency isn't in the list at all yet # then add an entry so the system knows we are # expecting it later (for the above check) new_discovery[dependency] = DiscoveryState.AWARE_OF # If the dependencies are satisfied (or there weren't # any) then this file can be compiled now if len(compiled) == 0 or all(compiled): for definition in artifact.defines: task = self._taskmap[(artifact.filetype, artifact.state)] new_artifacts.extend(task.run([artifact])) new_discovery[definition] = DiscoveryState.COMPILED else: # If the dependencies weren't all satisfied then # back on the queue for another pass later new_artifacts.append(artifact) else: # If it wasn't required it could be later, so # put it back on the queue, unless the target # has been compiled, in which case it wasn't # needed at all! if (self._target not in discovery or discovery[self._target] != DiscoveryState.COMPILED): new_artifacts.append(artifact) elif artifact.state is Compiled: # Begin populating the list for linking new_objects.append(artifact) # But do not return a new artifact - this object # is "done" as far as the processing is concerned # But, if this is the file containing the target # that means everything must have been compiled # by this point; so we can do the linking step if self._target in artifact.defines: task = self._taskmap[(artifact.filetype, artifact.state)] new_artifacts.extend(task.run(objects + [artifact])) elif artifact.state is Linked: # Nothing to do at present with the final linked # executable, but included here for completeness pass else: # If the object specifies any paths in its dependencies # then these must exist before it can be processed # TODO: This needs more thorough logic and to come from # the database eventually ready = True for dependency in artifact.depends_on: if isinstance(dependency, Path): if not dependency.exists(): ready = False if ready: # An artifact with a filetype and state set # will have an appropriate task that should # be used to run it (though unlike the old # implementation this is probably returning # the instance of the Task not the class) if ((artifact.filetype, artifact.state) in self._taskmap): task = self._taskmap[(artifact.filetype, artifact.state)] new_artifacts.extend(task.run([artifact])) else: new_artifacts.append(artifact) # Update shared arrays lock.acquire() objects.extend(new_objects) for key, value in new_discovery.items(): discovery[key] = value lock.release() return new_artifacts
def __init__(self): self._events = [] self._lock = Lock()
# limitations under the License. import logging import uuid from configparser import NoOptionError import nexusproto.DataTile_pb2 as nexusproto import numpy as np from cassandra.auth import PlainTextAuthProvider from cassandra.cqlengine import columns, connection, CQLEngineException from cassandra.cqlengine.models import Model from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, WhiteListRoundRobinPolicy from multiprocessing.synchronize import Lock from nexusproto.serialization import from_shaped_array INIT_LOCK = Lock(ctx=None) logger = logging.getLogger(__name__) class NexusTileData(Model): __table_name__ = 'sea_surface_temp' tile_id = columns.UUID(primary_key=True) tile_blob = columns.Blob() __nexus_tile = None def _get_nexus_tile(self): if self.__nexus_tile is None: self.__nexus_tile = nexusproto.TileData.FromString(self.tile_blob)
from ObexSender.ObexSender import ObexSender import ProximusAgent from ProximusAgent import ProximusAgent import getopt #from encodings.punycode import adapt def DeviceFound(address, properties): if not queue.has_key(address): queue[address] = properties #print "# Queued %s Name: %s" % (address, properties["Name"]) manager = bluez.Manager('gobject') adapter = manager.DefaultAdapter() queueLock = Lock() queue = {} logger = None def MePropertyChanged(name, value): global adapter global logger if (logger != None): logger.debug("# - property changed (%s: %s)" % (name, value)) if (name == "Discovering"): if (value == 1): if (logger != None): logger.debug("# - queue sleep 10") time.sleep(10) queueLock.acquire()
class Queue(object): def __init__(self, maxsize = 0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) return def __getstate__(self): assert_spawning(self) return (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll return def put(self, obj, block = True, timeout = None): if not not self._closed: raise AssertionError raise self._sem.acquire(block, timeout) or Full self._notempty.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() finally: self._notempty.release() return def get(self, block = True, timeout = None): if block and timeout is None: self._rlock.acquire() try: res = self._recv() self._sem.release() return res finally: self._rlock.release() else: if block: deadline = time.time() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - time.time() if timeout < 0 or not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() return def qsize(self): return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True self._reader.close() if self._close: self._close() def join_thread(self): debug('Queue.join_thread()') if not self._closed: raise AssertionError self._jointhread and self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') self._buffer.clear() self._thread = threading.Thread(target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close), name='QueueFeederThread') self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize(self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5) self._close = Finalize(self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') return @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') notempty.acquire() try: buffer.append(_sentinel) notempty.notify() finally: notempty.release() @staticmethod def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from .util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except Exception as e: try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass return
class Queue(object): def __init__(self, maxsize=0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): assert_spawning(self) return (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full self._notempty.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() finally: self._notempty.release() def get(self, block=True, timeout=None): if block and timeout is None: self._rlock.acquire() try: res = self._recv() self._sem.release() return res finally: self._rlock.release() else: if block: deadline = time.time() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if not self._poll(block and (deadline - time.time()) or 0.0): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() def qsize(self): # Raises NotImplementError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True self._reader.close() if self._close: self._close() def join_thread(self): debug('Queue.join_thread()') assert self._closed if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread(target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close), name='QueueFeederThread') self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') # On process exit we will wait for data to be flushed to pipe. # # However, if this process created the queue then all # processes which use the queue will be descendants of this # process. Therefore waiting for the queue to be flushed # is pointless once all the child processes have been joined. created_by_this_process = (self._opid == os.getpid()) if not self._joincancelled and not created_by_this_process: self._jointhread = Finalize(self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5) # Send sentinel to the thread queue object when garbage collected self._close = Finalize(self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') notempty.acquire() try: buffer.append(_sentinel) notempty.notify() finally: notempty.release() @staticmethod def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from .util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except Exception, e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass
class Queue(object): def __init__(self, maxsize=0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): assert_spawning(self) return (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full self._notempty.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() finally: self._notempty.release() def get(self, block=True, timeout=None): if block and timeout is None: self._rlock.acquire() try: res = self._recv() self._sem.release() return res finally: self._rlock.release() else: if block: deadline = time.time() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if not self._poll(block and (deadline-time.time()) or 0.0): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True self._reader.close() if self._close: self._close() def join_thread(self): debug('Queue.join_thread()') assert self._closed if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') # On process exit we will wait for data to be flushed to pipe. # # However, if this process created the queue then all # processes which use the queue will be descendants of this # process. Therefore waiting for the queue to be flushed # is pointless once all the child processes have been joined. created_by_this_process = (self._opid == os.getpid()) if not self._joincancelled and not created_by_this_process: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') notempty.acquire() try: buffer.append(_sentinel) notempty.notify() finally: notempty.release() @staticmethod def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from .util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except Exception as e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass
def main(argv=None): # IGNORE:C0111 '''Command line options.''' if argv is None: argv = sys.argv else: sys.argv.extend(argv) parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter) parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]") # parser.add_argument("-dir", "--structs_dir", required = True ) parser.add_argument("-db", "--database_name", default='pdb') parser.add_argument("-host", "--db_host", default='127.0.0.1') parser.add_argument("--procesados", default='/tmp/pdbs_dist_procesados.txt') parser.add_argument("--domains", default='/data/databases/pdb/processed/dns_pdbs.tlb') parser.add_argument( "--seqs", default='/data/databases/pdb/processed/pdb_seq_res.fasta') parser.add_argument("--pdbs", default='/data/databases/pdb/') parser.add_argument( "--distances", default='/data/databases/pdb/processed/distances.tbl', help= "Final output: table with atom distances between residues and ligands. Only for distances less than 'dist' parameter" ) parser.add_argument("--dist", default=5) parser.add_argument( "--pdbs_with_drug", default='/data/databases/pdb/processed/pdbs_with_drug.txt', help="Output: list of PDB codes with an associated ligand") args = parser.parse_args() if not os.path.exists(args.pdbs): sys.stderr.write( "%s not found. Specify where is pdbs/divided directory" % (parser.pdbs)) sys.exit(1) PDB_PATH = args.pdbs CONTACT_DIST = args.dist pdbs_with_drug_path = args.pdbs_with_drug if not os.path.exists(os.path.dirname(args.pdbs_with_drug)): sys.stderr.write("can't %s create %s. Set pdbs_with_drug correctly" % (pdbs_with_drug_path)) sys.exit(1) if not os.path.exists(os.path.dirname(args.distances)): sys.stderr.write("can't %s create %s. Set distances correctly" % (args.distances)) sys.exit(1) pdbs_procesados_path = args.procesados print( "In %s the processed pdbs are kept, if the file is deleted, the process starts from scratch " % pdbs_procesados_path) print("Outputs: '%s' and '%s' " % (pdbs_with_drug_path, args.distances)) pdbs_procesados = [] if os.path.exists(pdbs_procesados_path): with open(pdbs_procesados_path) as handle: pdbs_procesados = [x.strip() for x in handle.readlines()] pdbs_procesados = {x: 1 for x in pdbs_procesados} pdbs_iterator = PDBsIterator(pdb_dir=args.pdbs) def not_processed_iter(): for pdb, pdb_path in pdbs_iterator: if pdb not in pdbs_procesados: yield [pdb, pdb_path] DNsPDBs = args.domains if not os.path.exists(DNsPDBs): seqs_from_pdb = args.seqs if not os.path.exists(seqs_from_pdb): sys.stderr.write( "%s does not exists and %s not found. Specify where it is." % (DNsPDBs, seqs_from_pdb)) sys.exit(1) sys.stderr.write( "%s not found. You can create it with the following command: \n" % DNsPDBs) sys.stderr.write( "hmmscan --cut_tc --domtblout dns_pdbs.tlb --acc -o pdb_seq_res.hmm Pfam-A.hmm seqs_from_pdb.fasta" ) sys.exit(1) drugcompounds = [ x for x, y in compound_type.items() if y in ["DRUG", "COFACTOR"] ] othercompounds = [ x for x, y in compound_type.items() if y in ["METAL", "SUGAR", "NUCLEOTIDE", "LIPID"] ] aminoacidcompounds = [ x for x, y in compound_type.items() if y in ["MODIFIED", "RESIDUE"] ] drugcompounds = othercompounds + drugcompounds pdbs_with_drug_path = "/data/databases/pdb/processed/pdbs_with_drug.txt" _log.info("proceced pdbs: %i" % len(pdbs_procesados)) ppb = CaPPBuilder() p = PDBParser(PERMISSIVE=1, QUIET=1) pdbs_with_drug = [] if os.path.exists(pdbs_with_drug_path): _log.info("pdbs with drugs already loaded") with open(pdbs_with_drug_path) as handle: for x in handle.readlines(): pdbs_with_drug.append(x.strip()) else: with open(pdbs_with_drug_path, "a") as handle: _log.info("pdbs with drugs will be loaded") pdbs = list(pdbs_iterator) for pdb, file_path in tqdm(pdbs): try: if pdb not in pdbs_with_drug: structure = p.get_structure(pdb, file_path) for res in structure.get_residues(): if res.resname in drugcompounds: pdbs_with_drug.append(pdb) handle.write(pdb + "\n") handle.flush() break except Exception as ex: print(str(ex)) # import re # dns_table = re.sub(r" +", "\t","\n".join( [str(i) + "\t" + x for i,x in enumerate(open('/data/databases/pdb/processed/dns_pdbs.tlb').readlines()) if not x.startswith("#") ]) ) if not os.path.exists(DNsPDBs + "2"): cols = [ "target_name", "accession", "tlen", "query_name", "accession2", "qlen", "E-value", "score1", "bias1", "#", "of", "c-Evalue", "i-Evalue", "score2", "bias2", "from1", "to1", "from2", "to2", "from3", "to3", "acc" ] _log.info("correcting hmmer-pdb output") regexp = re.compile(" +") items = [] for x in tqdm(open(DNsPDBs).readlines()): if not x.startswith("#"): line = regexp.split(x) items.append(line[0:len(cols)]) # record = {c: line[i] for i, c in enumerate(cols)} df_hmm = pd.DataFrame.from_records(items, columns=cols) # df_hmm = df = pd.read_table('/data/databases/pdb/processed/dns_pdbs.tlb', index_col=None, header=None, delimiter=r"\s+",comment="#",names=cols) # df_hmm = df_hmm.dropna() df_hmm = df_hmm[["accession", "query_name", "from3", "to3"]] df_hmm.to_csv(DNsPDBs + "2") df_hmm["pdb"] = map(lambda x: x.split("_")[0].lower().strip(), df_hmm["query_name"]) df_hmm["chain"] = map(lambda x: x.split("_")[1].upper().strip(), df_hmm["query_name"]) df_hmm["start_res"] = map(lambda x: x.split("_")[2].upper().strip(), df_hmm["query_name"]) df_hmm["end_res"] = map(lambda x: x.split("_")[3].upper().strip(), df_hmm["query_name"]) else: df_hmm = pd.read_csv(DNsPDBs + "2") df_hmm["pdb"] = map(lambda x: x.split("_")[0].lower().strip(), df_hmm["query_name"]) df_hmm["chain"] = map(lambda x: x.split("_")[1].upper().strip(), df_hmm["query_name"]) df_hmm["start_res"] = map(lambda x: x.split("_")[2].upper().strip(), df_hmm["query_name"]) df_hmm["end_res"] = map(lambda x: x.split("_")[3].upper().strip(), df_hmm["query_name"]) print(len(df_hmm)) lock = Lock() def centeroid(arr): length = len(arr) sum_x = np.sum([x.coord[0] for x in arr]) sum_y = np.sum([x.coord[1] for x in arr]) sum_z = np.sum([x.coord[2] for x in arr]) return sum_x / length, sum_y / length, sum_z / length def residues_near_drug(drug_centroid, aa_residues): residues_near = [] for r in aa_residues: for a in list(r): dist = a - Struct(coord=drug_centroid) if dist > 20: break if dist < 10: residues_near.append(r) break return residues_near def juan(pdb_raw): try: pepe(pdb_raw) except Exception: traceback.print_exc() finally: with lock: pdbs_procesados.append(pdb_raw) with open(pdbs_procesados_path, "a") as handle: handle.write(pdb_raw + "\n") def pepe(pdb): ppb = CaPPBuilder() p = PDBParser(PERMISSIVE=1, QUIET=1) path_dir = PDB_PATH + "/" + pdb[1:3].lower() + "/" path = path_dir + "pdb" + pdb.lower() + ".ent" model = list(p.get_structure('X', path))[0] for chain_obj in list(model): chain = chain_obj.id hmm_residues = {} pdb_seq = list(model[chain].get_residues()) if pdb_seq: hmm_contacts = {} hmm_residues = {} hmms = df_hmm[(df_hmm["pdb"] == pdb) & (df_hmm["chain"] == chain) & (df_hmm["start_res"] == str(pdb_seq[0].id[1]))] for j, hmm in hmms.iterrows(): try: hmm_start = int(hmm["from3"]) - 1 hmm_end = int(hmm["to3"]) - 1 hmm_chain_name = "_".join( map(str, [ hmm["accession"].split(".")[0], hmm["chain"], pdb_seq[hmm_start].id[1], pdb_seq[hmm_end].id[1] ])) hmm_contacts[hmm_chain_name] = [] hmm_residues.update({ res.id[1]: hmm_chain_name for res in pdb_seq[hmm_start:hmm_end] }) except IndexError: print(pdb, hmm["accession"], hmm["chain"], hmm_start, hmm_end, pdb_seq) aa_residues = [] drug_molecules = [] for res_obj in chain_obj.get_residues(): if res_obj.resname in drugcompounds: drug_molecules.append(res_obj) elif res_obj.resname in aminoacidcompounds: aa_residues.append(res_obj) for res_drug_obj in drug_molecules: drug_centroid = centeroid(list(res_drug_obj)) near_residues = residues_near_drug(drug_centroid, aa_residues) for drug_atom in list(res_drug_obj): for near_residue in near_residues: for residue_atom in list(near_residue): distance = (residue_atom - drug_atom) if distance > 20: break if distance < CONTACT_DIST: with open(args.distances, "a") as handle: hmm_name = hmm_residues[ near_residue.id[1]] if near_residue.id[ 1] in hmm_residues else "NoDn" fields = [ pdb, chain, hmm_name, near_residue.id[1], near_residue.resname, residue_atom.serial_number, res_drug_obj.id[1], res_drug_obj.resname, drug_atom.serial_number, distance ] handle.write("\t".join(map(str, fields)) + "\n") _log.info("processing distances file") for x in tqdm(set(pdbs_with_drug)): if x not in pdbs_procesados: juan(x) # pool = ThreadPool(1) # pool.map(juan, set(pdbs_with_drug) - set(pdbs_procesados)) print("Finished!!!")
# limitations under the License. import logging import uuid from ConfigParser import NoOptionError import nexusproto.DataTile_pb2 as nexusproto import numpy as np from cassandra.auth import PlainTextAuthProvider from cassandra.cqlengine import columns, connection, CQLEngineException from cassandra.cqlengine.models import Model from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, WhiteListRoundRobinPolicy from multiprocessing.synchronize import Lock from nexusproto.serialization import from_shaped_array INIT_LOCK = Lock() logger = logging.getLogger(__name__) class NexusTileData(Model): __table_name__ = 'sea_surface_temp' tile_id = columns.UUID(primary_key=True) tile_blob = columns.Blob() __nexus_tile = None def _get_nexus_tile(self): if self.__nexus_tile is None: self.__nexus_tile = nexusproto.TileData.FromString(self.tile_blob) return self.__nexus_tile
def Lock(): """ Returns a non-recursive lock object """ from multiprocessing.synchronize import Lock return Lock()
#! -*- coding: utf-8 -*- from rpccore.gen import Links from thrift.server import TServer from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from multiprocessing.synchronize import Lock _lock = Lock() class LinksHandler(Links.InterFace): DEFAULT_PORT = 9990 def getStand(self, predicate, pendings): try: _lock.acquire() print "do something" return finally: _lock.release() handler = LinksHandler() processor = Links.Processor(handler) transport = TSocket.TServerSocket(port=LinksHandler.DEFAULT_PORT) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
def Lock(): from multiprocessing.synchronize import Lock return Lock()
class PollsterBot(Daemon): def __init__(self, pid): Daemon.__init__(self, pid) # Reddit https://praw.readthedocs.io/en/stable/pages/comment_parsing.html self.reddit = {} self.default_subs = 'pollster_bot' self.bot_name = 'pollster_bot' self.version = '1.0' self.touched_comment_ids = [] # create logger self.logger = logging.getLogger('Pollster_Bot') self.logger.setLevel(logging.INFO) # File handler set to DEBUG fh = logging.FileHandler(filename=os.path.join( os.path.dirname(__file__), 'PollsterBotLog.txt')) fh.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) fh.setFormatter(formatter) # add ch, fh to logger self.logger.addHandler(ch) self.logger.addHandler(fh) self.lock = Lock() self.logger.info('Starting Pollster Bot ver. ' + self.version) # Huffington post http://elections.huffingtonpost.com/pollster/api self.uri = 'http://elections.huffingtonpost.com/pollster/api/charts.json' # Set states self.states = {} self.states = self.load_json_file('data/states.json') # phrases phrases = self.load_json_file('data/phrases.json') self.greetings = phrases['greeting'] self.winning = phrases['winning'] self.losing = phrases['losing'] # keywords to call the pollster bot self.keywords = self.load_json_file('data/keywords.json')['keywords'] # subs subs = self.load_json_file('data/subs.json')['subs'] for sub in subs: self.default_subs += '+' + sub self.log_in_credentials = self.load_json_file( 'data/login_credentials.json') def login(self): self.logger.info('Login started Pollster Bot ver. ' + self.version) login_name = self.log_in_credentials['user'] login_password = self.log_in_credentials['password'] self.reddit = praw.Reddit(user_agent='Pollster') self.reddit.login(login_name, login_password, disable_warning=False) self.logger.info('Login Completed Pollster Bot ver. ' + self.version) # Returns a dictionary of states. def load_json_file(self, filename): self.logger.info('Read {} Pollster Bot ver. {}'.format( filename, self.version)) fn = os.path.join(os.path.dirname(__file__), filename) with open(fn) as data_file: return json.load(data_file) def get_greeting(self): return random.choice(self.greetings) def get_winning(self, winner, points): return random.choice(self.winning).format(winner, points) def get_losing(self, loser, points): return random.choice(self.losing).format(loser, points) # Gets all submissions in the subreddit as a generator. def get_submissions(self, subreddit, submission_limit=25): donald_submissions = self.reddit.get_subreddit(subreddit).get_hot( limit=submission_limit) return donald_submissions @staticmethod def get_comments(submission, comment_limit=25): submission.replace_more_comments(limit=comment_limit, threshold=0) return submission.comments def get_flat_comments(self, submission, comment_limit=25): try: submission.replace_more_comments(limit=comment_limit, threshold=0) except requests.exceptions.ConnectionError: self.logger.error('Error fetching comments!') return None return praw.helpers.flatten_tree(submission.comments) def get_recent_comments(self, subreddit): comments = self.reddit.get_comments(subreddit) return praw.helpers.flatten_tree(comments) def get_comments_with_helper(self, subreddit): comments = praw.helpers.comment_stream(self.reddit, subreddit) return praw.helpers.flatten_tree(comments) # Gets polls from state, defaults to 2016-president race def get_poll_huffington(self, state, page=1, topic='2016-president'): poll_params = {'page': page, 'state': state, 'topic': topic} my_response = requests.get(self.uri, params=poll_params) if my_response.ok: polling_data = [] entry_data = {} json_response = my_response.json() for entry in json_response: entry_data['title'] = entry['title'] entry_data['state'] = entry['state'] entry_data['url'] = entry['url'] entry_data['last_updated'] = entry['last_updated'] entry_data['estimates'] = [] for estimate in entry['estimates']: estimate_data = {} estimate_data['choice'] = estimate['choice'] estimate_data['value'] = estimate['value'] estimate_data['party'] = estimate['party'] entry_data['estimates'].append(estimate_data) polling_data.append(entry_data) return polling_data # Returns a list of states abbreviations. @staticmethod def check_comment_for_dictionary_keys_and_values(comment, dictionary): comment_string = comment.body matches = [] abbrevs = [] # Check full names for x in dictionary.keys(): if x in comment_string and x not in matches: matches.append(x) for x in dictionary.values(): if x in comment_string and x not in matches: matches.append(x) # Return Abbrevs for match in matches: if match in dictionary.values(): abbrevs.append(match) else: for key in dictionary.keys(): if key is match: abbrevs.append(dictionary[key]) return abbrevs @staticmethod def check_word_in_list_in_string(list, string): """ Returns not None if a word in the list is contained in the string :param list: :param string: :return: None if list has no elements contained in string """ stuff = [string for word in list if (word in string)] return stuff def header_huffington(self): """ Builds a header for the huffington post output :return: """ head = '\n ^^Polls ^^fetched ^^from ^^[http://elections.huffingtonpost.com/](http://elections.huffingtonpost.com/).\n\n' head += '***{}***\n\n'.format(self.get_greeting()) head += '.\n\n' head += '.\n\n' return head def footer(self): """ Builds bot header :return: """ foot = '\n\nSummon pollster bot by typing in Pollster_Bot and then any state or states.\n\nEx. Pollster Bot CA Texas Maine RI visit [/r/Pollster_Bot](https://www.reddit.com/r/Pollster_Bot/) for more info' foot += "\n\n***If you have any feedback on this bot please [Click Here](http://i.imgur.com/YFIri5g.jpg).***" foot += '^^Pollster ^^bot ^^ver. ^^{}'.format(self.version) return foot @staticmethod def format_estimates(estimates): reply = '' reply += '\nChoice | Percentage | Party\n' reply += '------|----------|-----\n' for estimate in estimates: if not estimate['party']: estimate['party'] = '' reply += '{} | {} | {} \n'.format(estimate['choice'], str(estimate['value']), estimate['party']) return reply + '\n\n' def format_poll(self, poll): state = '' for name, abb in self.states.items(): if poll['state'] == abb: state = name reply = '' reply += '\n\n***' + state + ' Poll:' + '***\n\n' reply += self.format_estimates(poll['estimates']) datetime_string = poll['last_updated'] dt = parser.parse(datetime_string) datetime_string = dt.strftime('%b %d %Y %I:%M%p') reply += 'Date of poll: {} ^^GMT\n\n'.format(datetime_string) reply += r'^^Link ^^to ^^poll ^^' + str(poll['url'] + '\n\n') return reply def check_condition(self, comment): """ Checks if we have a keyword in the comment, then if we have a list of states also. :param comment: :return: if we should act on the comment or not """ if comment.id in self.touched_comment_ids: return False, None # First check for keywords in comment, for now we don't care about formatting after the keyword has_keyword = self.check_word_in_list_in_string( self.keywords, comment.body) if not has_keyword: return False, None # Next we check if we have states or abbreviations abbrevs = self.check_comment_for_dictionary_keys_and_values( comment, self.states) if len(abbrevs) < 1: return False, None if str(comment.author) == self.bot_name: return False, None for reply in comment.replies: if str(reply.author) == self.bot_name: return False, None return True, abbrevs def bot_action(self, comment, abbrevs): self.lock.acquire() response = self.header_huffington() done_reqs = [] for abbrev in abbrevs: if abbrev not in done_reqs: done_reqs.append(abbrev) polls = self.get_poll_huffington(abbrev) for poll in polls: response += self.format_poll(poll) response += self.footer() try: comment.reply(response) self.touched_comment_ids.append(comment.id) # log log_out = '' log_out += 'Time: {} \nAuthor: {} \nBody: {}\n States: {} \nResponse: {} \n'.format( (datetime.timedelta(milliseconds=(time.time()))), comment.author, comment.body, abbrevs, response) self.logger.info(log_out) except praw.errors.RateLimitExceeded: self.logger.warn("RateLimitExceeded!!! Response not posted!!!") finally: self.lock.release() def slow_loop(self): submissions = self.get_submissions(self.default_subs, submission_limit=100) for submission in submissions: self.logger.info(u'Crawling Submission ' + submission.title) time_start = time.time() comments = self.get_recent_comments(submission) for comment in comments: check, abbrevs = self.check_condition(comment) if check: self.bot_action(comment, abbrevs) time_end = time.time() crawl_time = int(time_end) - int(time_start) crawl_string = 'Crawl time: ' + str(crawl_time) + ' seconds' self.logger.info(crawl_string) def main_loop(self): for comment in self.get_recent_comments(self.default_subs): check, abbrevs = self.check_condition(comment) if check: self.bot_action(comment, abbrevs) time.sleep(2) def run_forever(self): self.logger.info('Forever Loop started Pollster Bot ver. ' + self.version) self.login() while 1 < 2: self.main_loop() def run(self): self.logger.info('Running Pollster Bot ver. ' + self.version) self.run_forever()