def f(lock: Lock, i: int): lock.acquire() try: print('hello world', i) print('hello world', i + 1) finally: lock.release()
def encode_worker( realtime_vocoder: RealtimeVocoder, time_length: float, extra_time: float, queue_input: Queue, queue_output: Queue, acquired_lock: Lock, ): logger = logging.getLogger('encode') init_logger(logger) logger.info('encode worker') stream = EncodeStream(vocoder=realtime_vocoder) stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time) acquired_lock.release() start_time = extra_time while True: item: Item = queue_input.get() start = time.time() wave: numpy.ndarray = item.item stream.add(start_time=start_time, data=wave) start_time += time_length feature_wrapper: AcousticFeatureWrapper = stream_wrapper.process_next( time_length=time_length) item.item = feature_wrapper queue_output.put(item) logger.debug(f'{item.index}: {time.time() - start}')
def decode_worker( realtime_vocoder: RealtimeVocoder, time_length: float, extra_time: float, vocoder_buffer_size: int, out_audio_chunk: int, output_silent_threshold: float, queue_input: Queue, queue_output: Queue, acquired_lock: Lock, ): logger = logging.getLogger('decode') init_logger(logger) logging.info('decode worker') realtime_vocoder.create_synthesizer( buffer_size=vocoder_buffer_size, number_of_pointers=16, ) stream = DecodeStream(vocoder=realtime_vocoder) stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time) acquired_lock.release() start_time = extra_time wave_fragment = numpy.empty(0) while True: item: Item = queue_input.get() start = time.time() feature: AcousticFeature = item.item stream.add( start_time=start_time, data=feature, ) start_time += time_length wave = stream_wrapper.process_next(time_length=time_length) wave_fragment = numpy.concatenate([wave_fragment, wave]) if len(wave_fragment) >= out_audio_chunk: wave, wave_fragment = wave_fragment[: out_audio_chunk], wave_fragment[ out_audio_chunk:] power = librosa.core.power_to_db(numpy.abs( librosa.stft(wave))**2).mean() if power < -output_silent_threshold: wave = None # pass else: wave = None item.item = wave queue_output.put(item) logger.debug(f'{item.index}: {time.time() - start}')
class AudioOutputLoop(Thread): def __init__(self, audioDevice, sampleRate = 44100, bufferSize = 512): Thread.__init__(self, name = "AudioOutputLoop") self._lock = Lock() self._isRunning = False self._audioDevice = audioDevice self._bufferSize = bufferSize self._sampleRate = sampleRate self._maxTimePerBlockInSec = bufferSize / sampleRate pygame.mixer.init(deviceName = audioDevice.name, frequency = sampleRate, buffer = bufferSize) self._channel = pygame.mixer.find_channel() self.testWave = wave.open("./tests/resources/test.wav", "r") self.totalFrames = self.testWave.getnframes() self.allsamples = self.testWave.readframes(self.totalFrames) self._byteArray = bytearray(self._bufferSize * 2) self.numFrames = 0 logger.info("Mixer initialized") def terminate(self): pygame.mixer.quit() def run(self): logger.debug("AudioOutputLoop started") self._isRunning = True while self._isRunning: startTime = clock() self._lock.acquire(True) # do cool stuff here if self.numFrames + self._bufferSize > self.totalFrames: self._isRunning = False else: startFrame = self.numFrames endFrame = startFrame + self._bufferSize * 2 bytesOut = bytes(self.allsamples[startFrame:endFrame]) sound = pygame.mixer.Sound(self.allsamples[startFrame:endFrame]) self._channel.play(sound) self.numFrames += self._bufferSize * 2 stopTime = clock() timeInBlockInSec = (stopTime - startTime) sleepTime = self._maxTimePerBlockInSec - timeInBlockInSec if sleepTime < 0: logger.warn("Audio dropout!") else: logger.info("CPU: %f", 100 * timeInBlockInSec / self._maxTimePerBlockInSec) sleep(sleepTime) self._lock.release()
def convert_worker( acoustic_converter: AcousticConverter, super_resolution: SuperResolution, time_length: float, extra_time: float, input_silent_threshold: float, queue_input: Queue, queue_output: Queue, acquired_lock: Lock, ): logger = logging.getLogger('convert') init_logger(logger) logging.info('convert worker') chainer.global_config.enable_backprop = False chainer.global_config.train = False stream = ConvertStream(voice_changer=VoiceChanger( super_resolution=super_resolution, acoustic_converter=acoustic_converter, threshold=input_silent_threshold, )) stream_wrapper = StreamWrapper(stream=stream, extra_time=extra_time) acquired_lock.release() start_time = extra_time while True: item: Item = queue_input.get() start = time.time() in_feature: AcousticFeatureWrapper = item.item stream.add( start_time=start_time, data=in_feature, ) start_time += time_length out_feature = stream_wrapper.process_next(time_length=time_length) item.item = out_feature queue_output.put(item) logger.debug(f'{item.index}: {time.time() - start}')
class MidiEventLoop(Thread): def __init__(self, delegate, pollIntervalInMs = 25): Thread.__init__(self, name = "MidiEventLoop") self._lock = Lock() self._isRunning = False self.delegate = delegate self._pollInterval = pollIntervalInMs / 1000 self.devices = None self.midiMappings = MidiMappingTable(PathFinder.findUserFile('midimappings.sql')) def quit(self): self._lock.acquire(True) if self.devices is not None: self.devices.closeAll() self._isRunning = False self._lock.release() logger.info("Closed all MIDI devices") def run(self): # Initialize is done here so as not to block the main thread self.devices = MidiDeviceList() self.devices.openAll() logger.debug("MidiEventLoop started") self._isRunning = True while self._isRunning: self._lock.acquire(True) for device in self.devices.openedInputs(): while device.poll(): self._parseEvent(device.readEvents()) self._lock.release() sleep(self._pollInterval) pygame.midi.quit() def _parseEvent(self, eventList): eventData = eventList[0][0] midiEvent = MidiEvent(eventData[0], eventData[1], eventData[2], eventList[0][1]) self.midiMappings.process(midiEvent, self.delegate)
class Queue(object): def __init__(self, maxsize=0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): assert_spawning(self) return (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full self._notempty.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() finally: self._notempty.release() def get(self, block=True, timeout=None): if block and timeout is None: self._rlock.acquire() try: res = self._recv() self._sem.release() return res finally: self._rlock.release() else: if block: deadline = time.time() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if not self._poll(block and (deadline - time.time()) or 0.0): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() def qsize(self): # Raises NotImplementError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True self._reader.close() if self._close: self._close() def join_thread(self): debug('Queue.join_thread()') assert self._closed if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread(target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close), name='QueueFeederThread') self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') # On process exit we will wait for data to be flushed to pipe. # # However, if this process created the queue then all # processes which use the queue will be descendants of this # process. Therefore waiting for the queue to be flushed # is pointless once all the child processes have been joined. created_by_this_process = (self._opid == os.getpid()) if not self._joincancelled and not created_by_this_process: self._jointhread = Finalize(self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5) # Send sentinel to the thread queue object when garbage collected self._close = Finalize(self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') notempty.acquire() try: buffer.append(_sentinel) notempty.notify() finally: notempty.release() @staticmethod def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from .util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except Exception, e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass
class Queue(object): def __init__(self, maxsize=0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): assert_spawning(self) return (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full self._notempty.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() finally: self._notempty.release() def get(self, block=True, timeout=None): if block and timeout is None: self._rlock.acquire() try: res = self._recv() self._sem.release() return res finally: self._rlock.release() else: if block: deadline = time.time() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if not self._poll(block and (deadline-time.time()) or 0.0): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() def qsize(self): # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True self._reader.close() if self._close: self._close() def join_thread(self): debug('Queue.join_thread()') assert self._closed if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') # On process exit we will wait for data to be flushed to pipe. # # However, if this process created the queue then all # processes which use the queue will be descendants of this # process. Therefore waiting for the queue to be flushed # is pointless once all the child processes have been joined. created_by_this_process = (self._opid == os.getpid()) if not self._joincancelled and not created_by_this_process: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') notempty.acquire() try: buffer.append(_sentinel) notempty.notify() finally: notempty.release() @staticmethod def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from .util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except Exception as e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass
def process(self, artifact: Artifact, discovery: Dict[str, DiscoveryState], objects: List[Artifact], lock: LockT) -> List[Artifact]: new_artifacts: List[Artifact] = [] new_discovery: Dict[str, DiscoveryState] = {} new_objects: List[Artifact] = [] # Identify tasks that are completely new if (artifact.state is New and artifact.filetype is Unknown): # Use the pathmap list to work out the # filetype and starting state new_artifact = None for pathmap in self._pathmaps: if artifact.location in pathmap: new_artifact = Artifact(artifact.location, pathmap.filetype, pathmap.state) # Assuming we found a match and were able # to create the artifact, return it so that # it can be added to the queue if new_artifact is not None: # Also store its hash in the file database file_info = FileInfoDatabase(self._database) file_info.add_file_info(artifact.location, new_artifact.hash) new_artifacts.append(new_artifact) elif artifact.state is Analysed: # Work out whether this artifact needs to be # included in the build or not - if any of its # definitions are mentioned in the (shared) # discovery mapping, or if it is defining # the target of the build then it should be included # TODO: Looping through a list of what could # eventually contain every unit/symbol in the build has # the potential to become an issue for performance. # Longer term we probably want to drop using the shared # discovery array in favour of database lookups required = False for definition in artifact.defines: # Is this the target? if (definition == self.target or definition in discovery): required = True break if required: # Update the discovery list to indicate that # the definitions from this Artifact are present # (but not yet compiled) for definition in artifact.defines: if definition not in discovery: new_discovery[definition] = DiscoveryState.SEEN # Now check whether the Artifact's dependencies # have already been seen and compiled compiled = [False]*len(artifact.depends_on) for idep, dependency in enumerate(artifact.depends_on): # Only applies to str dependencies if isinstance(dependency, Path): continue if dependency in discovery: # Are the dependencies compiled? if discovery[dependency] == DiscoveryState.COMPILED: compiled[idep] = True else: # If the dependency isn't in the list at all yet # then add an entry so the system knows we are # expecting it later (for the above check) new_discovery[dependency] = DiscoveryState.AWARE_OF # If the dependencies are satisfied (or there weren't # any) then this file can be compiled now if len(compiled) == 0 or all(compiled): for definition in artifact.defines: task = self._taskmap[(artifact.filetype, artifact.state)] new_artifacts.extend(task.run([artifact])) new_discovery[definition] = DiscoveryState.COMPILED else: # If the dependencies weren't all satisfied then # back on the queue for another pass later new_artifacts.append(artifact) else: # If it wasn't required it could be later, so # put it back on the queue, unless the target # has been compiled, in which case it wasn't # needed at all! if (self._target not in discovery or discovery[self._target] != DiscoveryState.COMPILED): new_artifacts.append(artifact) elif artifact.state is Compiled: # Begin populating the list for linking new_objects.append(artifact) # But do not return a new artifact - this object # is "done" as far as the processing is concerned # But, if this is the file containing the target # that means everything must have been compiled # by this point; so we can do the linking step if self._target in artifact.defines: task = self._taskmap[(artifact.filetype, artifact.state)] new_artifacts.extend(task.run(objects + [artifact])) elif artifact.state is Linked: # Nothing to do at present with the final linked # executable, but included here for completeness pass else: # If the object specifies any paths in its dependencies # then these must exist before it can be processed # TODO: This needs more thorough logic and to come from # the database eventually ready = True for dependency in artifact.depends_on: if isinstance(dependency, Path): if not dependency.exists(): ready = False if ready: # An artifact with a filetype and state set # will have an appropriate task that should # be used to run it (though unlike the old # implementation this is probably returning # the instance of the Task not the class) if ((artifact.filetype, artifact.state) in self._taskmap): task = self._taskmap[(artifact.filetype, artifact.state)] new_artifacts.extend(task.run([artifact])) else: new_artifacts.append(artifact) # Update shared arrays lock.acquire() objects.extend(new_objects) for key, value in new_discovery.items(): discovery[key] = value lock.release() return new_artifacts
class Queue(object): def __init__(self, maxsize = 0): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) return def __getstate__(self): assert_spawning(self) return (self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send = self._writer.send self._recv = self._reader.recv self._poll = self._reader.poll return def put(self, obj, block = True, timeout = None): if not not self._closed: raise AssertionError raise self._sem.acquire(block, timeout) or Full self._notempty.acquire() try: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() finally: self._notempty.release() return def get(self, block = True, timeout = None): if block and timeout is None: self._rlock.acquire() try: res = self._recv() self._sem.release() return res finally: self._rlock.release() else: if block: deadline = time.time() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - time.time() if timeout < 0 or not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv() self._sem.release() return res finally: self._rlock.release() return def qsize(self): return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True self._reader.close() if self._close: self._close() def join_thread(self): debug('Queue.join_thread()') if not self._closed: raise AssertionError self._jointhread and self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') self._buffer.clear() self._thread = threading.Thread(target=Queue._feed, args=(self._buffer, self._notempty, self._send, self._wlock, self._writer.close), name='QueueFeederThread') self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') if not self._joincancelled: self._jointhread = Finalize(self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5) self._close = Finalize(self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') return @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') notempty.acquire() try: buffer.append(_sentinel) notempty.notify() finally: notempty.release() @staticmethod def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from .util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except Exception as e: try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass return
class PollsterBot(Daemon): def __init__(self, pid): Daemon.__init__(self, pid) # Reddit https://praw.readthedocs.io/en/stable/pages/comment_parsing.html self.reddit = {} self.default_subs = 'pollster_bot' self.bot_name = 'pollster_bot' self.version = '1.0' self.touched_comment_ids = [] # create logger self.logger = logging.getLogger('Pollster_Bot') self.logger.setLevel(logging.INFO) # File handler set to DEBUG fh = logging.FileHandler(filename=os.path.join( os.path.dirname(__file__), 'PollsterBotLog.txt')) fh.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) fh.setFormatter(formatter) # add ch, fh to logger self.logger.addHandler(ch) self.logger.addHandler(fh) self.lock = Lock() self.logger.info('Starting Pollster Bot ver. ' + self.version) # Huffington post http://elections.huffingtonpost.com/pollster/api self.uri = 'http://elections.huffingtonpost.com/pollster/api/charts.json' # Set states self.states = {} self.states = self.load_json_file('data/states.json') # phrases phrases = self.load_json_file('data/phrases.json') self.greetings = phrases['greeting'] self.winning = phrases['winning'] self.losing = phrases['losing'] # keywords to call the pollster bot self.keywords = self.load_json_file('data/keywords.json')['keywords'] # subs subs = self.load_json_file('data/subs.json')['subs'] for sub in subs: self.default_subs += '+' + sub self.log_in_credentials = self.load_json_file( 'data/login_credentials.json') def login(self): self.logger.info('Login started Pollster Bot ver. ' + self.version) login_name = self.log_in_credentials['user'] login_password = self.log_in_credentials['password'] self.reddit = praw.Reddit(user_agent='Pollster') self.reddit.login(login_name, login_password, disable_warning=False) self.logger.info('Login Completed Pollster Bot ver. ' + self.version) # Returns a dictionary of states. def load_json_file(self, filename): self.logger.info('Read {} Pollster Bot ver. {}'.format( filename, self.version)) fn = os.path.join(os.path.dirname(__file__), filename) with open(fn) as data_file: return json.load(data_file) def get_greeting(self): return random.choice(self.greetings) def get_winning(self, winner, points): return random.choice(self.winning).format(winner, points) def get_losing(self, loser, points): return random.choice(self.losing).format(loser, points) # Gets all submissions in the subreddit as a generator. def get_submissions(self, subreddit, submission_limit=25): donald_submissions = self.reddit.get_subreddit(subreddit).get_hot( limit=submission_limit) return donald_submissions @staticmethod def get_comments(submission, comment_limit=25): submission.replace_more_comments(limit=comment_limit, threshold=0) return submission.comments def get_flat_comments(self, submission, comment_limit=25): try: submission.replace_more_comments(limit=comment_limit, threshold=0) except requests.exceptions.ConnectionError: self.logger.error('Error fetching comments!') return None return praw.helpers.flatten_tree(submission.comments) def get_recent_comments(self, subreddit): comments = self.reddit.get_comments(subreddit) return praw.helpers.flatten_tree(comments) def get_comments_with_helper(self, subreddit): comments = praw.helpers.comment_stream(self.reddit, subreddit) return praw.helpers.flatten_tree(comments) # Gets polls from state, defaults to 2016-president race def get_poll_huffington(self, state, page=1, topic='2016-president'): poll_params = {'page': page, 'state': state, 'topic': topic} my_response = requests.get(self.uri, params=poll_params) if my_response.ok: polling_data = [] entry_data = {} json_response = my_response.json() for entry in json_response: entry_data['title'] = entry['title'] entry_data['state'] = entry['state'] entry_data['url'] = entry['url'] entry_data['last_updated'] = entry['last_updated'] entry_data['estimates'] = [] for estimate in entry['estimates']: estimate_data = {} estimate_data['choice'] = estimate['choice'] estimate_data['value'] = estimate['value'] estimate_data['party'] = estimate['party'] entry_data['estimates'].append(estimate_data) polling_data.append(entry_data) return polling_data # Returns a list of states abbreviations. @staticmethod def check_comment_for_dictionary_keys_and_values(comment, dictionary): comment_string = comment.body matches = [] abbrevs = [] # Check full names for x in dictionary.keys(): if x in comment_string and x not in matches: matches.append(x) for x in dictionary.values(): if x in comment_string and x not in matches: matches.append(x) # Return Abbrevs for match in matches: if match in dictionary.values(): abbrevs.append(match) else: for key in dictionary.keys(): if key is match: abbrevs.append(dictionary[key]) return abbrevs @staticmethod def check_word_in_list_in_string(list, string): """ Returns not None if a word in the list is contained in the string :param list: :param string: :return: None if list has no elements contained in string """ stuff = [string for word in list if (word in string)] return stuff def header_huffington(self): """ Builds a header for the huffington post output :return: """ head = '\n ^^Polls ^^fetched ^^from ^^[http://elections.huffingtonpost.com/](http://elections.huffingtonpost.com/).\n\n' head += '***{}***\n\n'.format(self.get_greeting()) head += '.\n\n' head += '.\n\n' return head def footer(self): """ Builds bot header :return: """ foot = '\n\nSummon pollster bot by typing in Pollster_Bot and then any state or states.\n\nEx. Pollster Bot CA Texas Maine RI visit [/r/Pollster_Bot](https://www.reddit.com/r/Pollster_Bot/) for more info' foot += "\n\n***If you have any feedback on this bot please [Click Here](http://i.imgur.com/YFIri5g.jpg).***" foot += '^^Pollster ^^bot ^^ver. ^^{}'.format(self.version) return foot @staticmethod def format_estimates(estimates): reply = '' reply += '\nChoice | Percentage | Party\n' reply += '------|----------|-----\n' for estimate in estimates: if not estimate['party']: estimate['party'] = '' reply += '{} | {} | {} \n'.format(estimate['choice'], str(estimate['value']), estimate['party']) return reply + '\n\n' def format_poll(self, poll): state = '' for name, abb in self.states.items(): if poll['state'] == abb: state = name reply = '' reply += '\n\n***' + state + ' Poll:' + '***\n\n' reply += self.format_estimates(poll['estimates']) datetime_string = poll['last_updated'] dt = parser.parse(datetime_string) datetime_string = dt.strftime('%b %d %Y %I:%M%p') reply += 'Date of poll: {} ^^GMT\n\n'.format(datetime_string) reply += r'^^Link ^^to ^^poll ^^' + str(poll['url'] + '\n\n') return reply def check_condition(self, comment): """ Checks if we have a keyword in the comment, then if we have a list of states also. :param comment: :return: if we should act on the comment or not """ if comment.id in self.touched_comment_ids: return False, None # First check for keywords in comment, for now we don't care about formatting after the keyword has_keyword = self.check_word_in_list_in_string( self.keywords, comment.body) if not has_keyword: return False, None # Next we check if we have states or abbreviations abbrevs = self.check_comment_for_dictionary_keys_and_values( comment, self.states) if len(abbrevs) < 1: return False, None if str(comment.author) == self.bot_name: return False, None for reply in comment.replies: if str(reply.author) == self.bot_name: return False, None return True, abbrevs def bot_action(self, comment, abbrevs): self.lock.acquire() response = self.header_huffington() done_reqs = [] for abbrev in abbrevs: if abbrev not in done_reqs: done_reqs.append(abbrev) polls = self.get_poll_huffington(abbrev) for poll in polls: response += self.format_poll(poll) response += self.footer() try: comment.reply(response) self.touched_comment_ids.append(comment.id) # log log_out = '' log_out += 'Time: {} \nAuthor: {} \nBody: {}\n States: {} \nResponse: {} \n'.format( (datetime.timedelta(milliseconds=(time.time()))), comment.author, comment.body, abbrevs, response) self.logger.info(log_out) except praw.errors.RateLimitExceeded: self.logger.warn("RateLimitExceeded!!! Response not posted!!!") finally: self.lock.release() def slow_loop(self): submissions = self.get_submissions(self.default_subs, submission_limit=100) for submission in submissions: self.logger.info(u'Crawling Submission ' + submission.title) time_start = time.time() comments = self.get_recent_comments(submission) for comment in comments: check, abbrevs = self.check_condition(comment) if check: self.bot_action(comment, abbrevs) time_end = time.time() crawl_time = int(time_end) - int(time_start) crawl_string = 'Crawl time: ' + str(crawl_time) + ' seconds' self.logger.info(crawl_string) def main_loop(self): for comment in self.get_recent_comments(self.default_subs): check, abbrevs = self.check_condition(comment) if check: self.bot_action(comment, abbrevs) time.sleep(2) def run_forever(self): self.logger.info('Forever Loop started Pollster Bot ver. ' + self.version) self.login() while 1 < 2: self.main_loop() def run(self): self.logger.info('Running Pollster Bot ver. ' + self.version) self.run_forever()