def __init__(self, record_class, template, data_path, unique_key, priority_key, defaults={}, delimiter="|", cache_size=2**16): """ See RecordFactory for record_class, 'template', 'defaults', and 'delimiter'. 'data_path' and 'cache_size' are for _pQ and _dumpQ unique_key and priority_key are integer indexes into 'fields' indicating which to attributes of records to use as unique keys and priorities. """ RecordFactory.__init__(self, record_class, template, defaults, delimiter) self._unique_key = unique_key self._priority_key = priority_key self._cache_size = cache_size self._pQ_path = os.path.join(data_path, "pQ") self._pQ_sync = os.path.join(data_path, "pQ_sync") self._pQ = None self._dumpQ_path = os.path.join(data_path, "dumpQ") self._dumpQ_sync = os.path.join(data_path, "dumpQ_sync") self._dumpQ = FIFO(self._dumpQ_path, self._cache_size) # the next deserialized value to return via get self._next = None self._sync_pending_path = os.path.join(data_path, "sync_pending") self._sync_pending = Mutex(self._sync_pending_path) self._lock = Mutex(os.path.join(data_path, "lock_file"))
def __init__(self): self.mutex = Mutex() self.toggle_callbacks = [] self.state = None self.led = Pin(13, Pin.OUT) # active low self.relay = Pin(4, Pin.OUT) self.button = Pin(12, Pin.IN) self.off() def button_callback(): print('*button pressed') self.toggle() self.set_button_callback(button_callback)
def run(self): "executes sort, accumulate, sort" try: openlog(self.name, LOG_NDELAY | LOG_CONS | LOG_PID, LOG_LOCAL0) q = self._queue _sync_pending = Mutex(q._sync_pending_path) _sync_pending.acquire() pQ = FIFO(q._pQ_path, q._cache_size) assert len(pQ) == 0 pQ_data = os.path.join(q._pQ_sync, "data") dumpQ_data = os.path.join(q._dumpQ_sync, "data") in_files = [ os.path.join(pQ_data, cache_file) for cache_file in os.listdir(pQ_data) ] in_files += [ os.path.join(dumpQ_data, cache_file) for cache_file in os.listdir(dumpQ_data) ] start = time() # fast version of chained generators map( pQ.put, q.sort( q.accumulate( q.mergefiles(in_files, (q._unique_key, ))), (q._priority_key, ))) # slow version of this generator chain: #merged_lines = [] #for x in q.mergefiles(in_files, q._unique_key): # syslog("out of mergefiles: %s" % repr(x)) # merged_lines.append(x) #accumulated_lines = [] #for x in q.accumulate(merged_lines): # syslog("out of accumulator: %s" % repr(x)) # accumulated_lines.append(x) #for x in q.sort(accumulated_lines, q._priority_key): # syslog("out of sort: %s" % repr(x)) # pQ.put(x) end = time() pQ.close() syslog(LOG_INFO, "merge took %.1f seconds" % (end - start)) shutil.rmtree(q._pQ_sync) shutil.rmtree(q._dumpQ_sync) _sync_pending.release() except Exception, exc: map(lambda line: syslog(LOG_NOTICE, line), traceback.format_exc(exc).splitlines())
def handle_text_message(event): text = event.message.text sourceId = getSourceId(event.source) matcher = re.match(r'^#(\d+) (.+)', text) if text == 'プラポ': poker_mutex = Mutex(redis, POKER_MUTEX_KEY_PREFIX + sourceId) poker_mutex.lock() if poker_mutex.is_lock(): number = str(redis.incr(sourceId)).encode('utf-8') line_bot_api.reply_message(event.reply_token, generate_planning_poker_message(number)) time.sleep(POKER_MUTEX_TIMEOUT) if poker_mutex.is_lock(): poker_mutex.unlock() elif matcher is not None: number = matcher.group(1) value = matcher.group(2) current = redis.get(sourceId).encode('utf-8') vote_key = sourceId + number status = redis.hget(vote_key, 'status') if status is None: if number != current: line_bot_api.reply_message( event.reply_token, TextMessage(text=MESSAGE_INVALID_VOTE.format(number))) return poker_mutex = Mutex(redis, POKER_MUTEX_KEY_PREFIX + sourceId) vote_mutex = Mutex(redis, VOTE_MUTEX_KEY_PREFIX + sourceId) location = mapping.keys()[mapping.values().index(value)] vote_mutex.lock() if vote_mutex.is_lock(): time.sleep(VOTE_MUTEX_TIMEOUT) redis.hincrby(vote_key, location) line_bot_api.reply_message( event.reply_token, genenate_voting_result_message(vote_key)) redis.hset(vote_key, 'status', 'complete') vote_mutex.unlock() poker_mutex.release() else: redis.hincrby(vote_key, location) else: line_bot_api.reply_message( event.reply_token, TextMessage(text=MESSAGE_END_POKER.format(number)))
def main(argv): hlog.flags() random.seed(FLAGS.seed) np.random.seed(FLAGS.seed) torch.manual_seed(FLAGS.seed) #input_symbols_list = set(['red', 'yellow', 'green', 'blue', 'purple', 'pink', 'around', 'thrice', 'after']) input_symbols_list = set([ 'dax', 'lug', 'wif', 'zup', 'fep', 'blicket', 'kiki', 'tufa', 'gazzer' ]) output_symbols_list = set( ['RED', 'YELLOW', 'GREEN', 'BLUE', 'PURPLE', 'PINK']) study, test = generate_fig2_exp(input_symbols_list, output_symbols_list) vocab_x = Vocab() vocab_y = Vocab() if FLAGS.full_data: for sym in input_symbols_list: vocab_x.add(sym) for sym in output_symbols_list: vocab_y.add(sym) max_len_x = 7 max_len_y = 9 else: test, study = study[3:4], study[0:3] for (x, y) in test + study: for sym in x: vocab_x.add(sym) for sym in y: vocab_y.add(sym) max_len_x = 2 max_len_y = 2 hlog.value("vocab_x", vocab_x) hlog.value("vocab_y", vocab_y) hlog.value("study", study) hlog.value("test", test) train_items, test_items = encode(study, vocab_x, vocab_y), encode(test, vocab_x, vocab_y) # outlist = list(output_symbols_list) oracle_py = Oracle(train_items, test_items, DEVICE, dist="py", vocab_x=vocab_x, vocab_y=vocab_y) oracle_px = Oracle(train_items, test_items, DEVICE, dist="px", vocab_x=vocab_x, vocab_y=vocab_y) oracle_qxy = Oracle(train_items, test_items, DEVICE, dist="qxy", vocab_x=vocab_x, vocab_y=vocab_y) model = Mutex( vocab_x, vocab_y, FLAGS.dim, FLAGS.dim, oracle_py, max_len_x=max_len_x, max_len_y=max_len_y, copy=False, n_layers=FLAGS.n_layers, self_att=False, dropout=FLAGS.dropout, lamda=FLAGS.lamda, kl_lamda=FLAGS.kl_lamda, Nsample=FLAGS.Nsample, temp=FLAGS.temp, regularize=FLAGS.regularize, ent=FLAGS.ent, ).to(DEVICE) if FLAGS.regularize and not isinstance(model.px, Oracle): with hlog.task("pretrain px"): pretrain(model.px, train_items + test_items, test_items) for p in model.px.parameters(): p.requires_grad = False with hlog.task("Initial Samples"): hlog.value("px samples", "\n".join(model.sample_px(20))) hlog.value("py samples", "\n".join(model.sample_py(20))) hlog.value("qxy debug samples", "\n".join(model.sample_qxy_debug(N=20))) hlog.value( "qxy debug data", "\n".join(model.sample_qxy_debug_data(train_items + test_items))) # hlog.value("qxy samples", "\n".join(model.sample_qxy(model.py.sample(20,max_len),temp=model.temp))) # hlog.value("qxy samples (gumbel)", "\n".join(model.sample_qxy_gumbel(model.py.sample(20,max_len),temp=model.temp))) # if not isinstance(model.qxy,Oracle): # train(model.qxy, swap_io(train_items) + swap_io(test_items), swap_io(test_items)) # if not isinstance(model.pyx,Oracle): # train(model.pyx, train_items + test_items, test_items) # for param in model.pyx.parameters(): # param.requires_grad = False with hlog.task("train model"): acc, f1 = train(model, train_items, test_items) with hlog.task("Final Samples"): hlog.value("px samples", "\n".join(model.sample_px(20))) hlog.value("py samples", "\n".join(model.sample_py(20))) hlog.value("qxy debug samples", "\n".join(model.sample_qxy_debug(N=20))) hlog.value( "qxy debug data", "\n".join(model.sample_qxy_debug_data(train_items + test_items))) hlog.value( "qxy samples (gumbel)", "\n".join( model.sample_qxy_gumbel(model.py.sample(20, max_len_y), temp=model.temp))) #hlog.value("qxy samples", "\n".join(model.sample_qxy(model.py.sample(20,max_len),temp=model.temp))) if FLAGS.regularize: losses = pd.DataFrame(model.loss_container) figure = sns.lineplot(data=losses, dashes=False).figure figure.savefig(f"{FLAGS.seed}_plot.png") with hlog.task("train evaluation"): validate(model, train_items, vis=True) with hlog.task("test evaluation"): validate(model, test_items, vis=True)
def communication_loop(self): while True: msg = self.communicationManager.recv_message() # blocking if msg is None: break if msg.type == "REQUEST": # requesting CS access mux = Mutex.get_mutex(msg.referenceId) if mux is not None: mux.operationMutex.acquire() if mux.requesting: # if REQUEST has earlier time then send AGREE if mux.requestClock < msg.clock: # add to queue mux.heldUpRequests.append(msg.senderId) else: if mux.requestClock == msg.clock and self.communicationManager.processId < msg.senderId: mux.heldUpRequests.append(msg.senderId) else: agreeReply = Message() agreeReply.referenceId = msg.referenceId agreeReply.recipientId = msg.senderId if mux.keepAlive: agreeReply.type = "RETURN" if mux.get_data_size() > 0: agreeReply.hasData = True else: agreeReply.type = "AGREE" self.communicationManager.send_message( agreeReply) else: if not mux.locked: agreeReply = Message() agreeReply.referenceId = msg.referenceId agreeReply.recipientId = msg.senderId if mux.keepAlive: agreeReply.type = "RETURN" else: agreeReply.type = "AGREE" self.communicationManager.send_message(agreeReply) else: mux.heldUpRequests.append(msg.senderId) mux.operationMutex.release() elif msg.type == "RETURN": # when leaving CS mux = Mutex.get_mutex(msg.referenceId) if mux is not None: mux.operationMutex.acquire() mux.agreeVector[msg.senderId] = True mux.keepAlive = False if mux.previousReturn is not None: del mux.previousReturn mux.previousReturn = msg mux.operationMutex.release() self.enter_critical_section(mux) elif msg.type == "AGREE": # process agrees to request for CS mux = Mutex.get_mutex(msg.referenceId) mux.operationMutex.acquire() if mux is not None and mux.requesting: mux.agreeVector[msg.senderId] = True # sender agreed mux.keepAlive = False # sb tries to acquire CS mux.operationMutex.release() self.enter_critical_section(mux) # try to enter CS else: mux.operationMutex.release() elif msg.type == "WAIT": # process is waiting cv = ConditionVariable.get_condition_variable(msg.referenceId) cv.conditionVariable.acquire() cv.waitingProcesses.append(msg.senderId) cv.conditionVariable.release() elif msg.type == "WAIT_RETURN": # process is not waiting anymore cv = ConditionVariable.get_condition_variable(msg.referenceId) cv.conditionVariable.acquire() cv.waitingProcesses.remove(msg.senderId) cv.conditionVariable.release() elif msg.type == "SIGNAL": # wake up waiting process cv = ConditionVariable.get_condition_variable(msg.referenceId) cv.conditionVariable.acquire() cv.waiting = False cv.conditionVariable.notify() cv.conditionVariable.release() elif msg.type == "QUIT": # process will no longer communicate self.quitMessages += 1 if self.quitMessages == self.communicationManager.processCount: self.communicationManager.initialized = False return
class BatchPriorityQueue(MergingRecordFactory): """ A subclass of RecordFactory that maintains two on-disk FIFOs of records: * _pQ is ordered by records' priority_field lowest-first. get(max_priority) retrieves next record from this queue if its priority is lower than max_priority. * _dumpQ is periodically sorted by unique_field, merged by the accumulate function, and sorted again by priority_field in order to populate the priority queue. put() adds records to the dump queue. Calling get() also puts a copy of the record into the dump queue, so during the merge, the accumulate function will see the previous record and any new records. For this reason, records put back into the queue after processing a record gotten from the queue should present *only changes* relative to the gotten record, so the accumulate function can simply add them. """ def __init__(self, record_class, template, data_path, unique_key, priority_key, defaults={}, delimiter="|", cache_size=2**16): """ See RecordFactory for record_class, 'template', 'defaults', and 'delimiter'. 'data_path' and 'cache_size' are for _pQ and _dumpQ unique_key and priority_key are integer indexes into 'fields' indicating which to attributes of records to use as unique keys and priorities. """ RecordFactory.__init__(self, record_class, template, defaults, delimiter) self._unique_key = unique_key self._priority_key = priority_key self._cache_size = cache_size self._pQ_path = os.path.join(data_path, "pQ") self._pQ_sync = os.path.join(data_path, "pQ_sync") self._pQ = None self._dumpQ_path = os.path.join(data_path, "dumpQ") self._dumpQ_sync = os.path.join(data_path, "dumpQ_sync") self._dumpQ = FIFO(self._dumpQ_path, self._cache_size) # the next deserialized value to return via get self._next = None self._sync_pending_path = os.path.join(data_path, "sync_pending") self._sync_pending = Mutex(self._sync_pending_path) self._lock = Mutex(os.path.join(data_path, "lock_file")) def close(self): """ Acquires lock, then raises self.Syncing if a sync is in progress, otherwise closes internal FIFOs. """ self._lock.acquire() try: if not self._sync_pending.available(): raise self.Syncing if self._pQ: self._pQ.close() self._dumpQ.close() finally: self._lock.release() class NotYet(Exception): "next record excluded by max_priority" pass class Blocked(Exception): "another process has the mutex" pass class Syncing(Exception): "sync in progress" pass class ReadyToSync(Exception): "pQ empty but records exist in dumpQ" pass def get(self, max_priority=None, block=True): """ If block=False and cannot acquire lock, raises self.Blocked. If a sync is in progress, raises self.Syncing. If both _pQ and _dumpQ are empty, then raise Queue.Empty. If empty _pQ but not empty _dumpQ, raise self.ReadyToSync. If next item in _pQ has a priority less than max_priority, then pops it from queue and returns record. """ acquired = self._lock.acquire(block) if not acquired: raise self.Blocked try: if not self._sync_pending.available(): raise self.Syncing if self._pQ is None: self._pQ = FIFO(self._pQ_path, self._cache_size) if self._next is None: # instantiate next record without removing from pQ, raises # Queue.Empty when no lines in FIFO try: line = self._pQ.next() self._next = self.loads(line) except Queue.Empty: if len(self._dumpQ) == 0: raise Queue.Empty else: raise self.ReadyToSync if max_priority is None or \ self._next[self._priority_key] < max_priority: # Remove this line from _pQ and put into _dumpQ. There # should be no risk of this raising Queue.Empty. self._dumpQ.put(self._pQ.get()) ret_next = self._next self._next = None # This is only place that get() returns: return ret_next elif max_priority is not None: raise self.NotYet else: raise Exception("Should never get here.") finally: self._lock.release() def put(self, record=None, values=None, attrs=None, block=True): """ If record=None, then values or attrs is passed to self.create() to obtain a record. record is put into _dumpQ. If block=False and cannot acquire lock, raises self.Blocked. """ acquired = self._lock.acquire(block) if not acquired: raise self.Blocked if record is None: if values is not None: record = self.create(*values) elif attrs is not None: record = self.create(**attrs) else: raise Exception("put without record, values, or attrs") self._dumpQ.put(self.dumps(record)) self._lock.release() def sync(self, block=True): """ Removes all records from _dumpQ and _pQ and performs sort on unique_key, accumulate, sort on priority_key before putting all records into _pQ. If block=False and cannot acquire lock, raises self.Blocked. """ acquired = self._lock.acquire(block) if not acquired: raise self.Blocked try: acquired = self._sync_pending.acquire(block=False) if not acquired: raise self.Syncing # move queues to the side if self._pQ: self._pQ.close() self._dumpQ.close() os.rename(self._pQ_path, self._pQ_sync) os.rename(self._dumpQ_path, self._dumpQ_sync) # set pQ to None while syncing, and reopen dumpQ self._pQ = None self._dumpQ = FIFO(self._dumpQ_path, self._cache_size) # Release sync lock momentarily, so merger can acquire it. # Get is blocked by _lock, so it won't get confused. self._sync_pending.release() # launch a child to sort, accumulate, sort merger = self.start_merger() # loop until merger acquires _sync_pending while merger.is_alive() and self._sync_pending.available(): sleep(0.1) # now get back to normal operation return merger finally: self._lock.release() def start_merger(self): """ defines, instantiates, and starts a multiprocessing.Process for sorting, accumulating, and sorting _dumpQ into new _pQ """ class Merger(multiprocessing.Process): "manages the sort, accumulate, sort" name = "SortAccumulateSort" _queue = self def run(self): "executes sort, accumulate, sort" try: openlog(self.name, LOG_NDELAY | LOG_CONS | LOG_PID, LOG_LOCAL0) q = self._queue _sync_pending = Mutex(q._sync_pending_path) _sync_pending.acquire() pQ = FIFO(q._pQ_path, q._cache_size) assert len(pQ) == 0 pQ_data = os.path.join(q._pQ_sync, "data") dumpQ_data = os.path.join(q._dumpQ_sync, "data") in_files = [ os.path.join(pQ_data, cache_file) for cache_file in os.listdir(pQ_data) ] in_files += [ os.path.join(dumpQ_data, cache_file) for cache_file in os.listdir(dumpQ_data) ] start = time() # fast version of chained generators map( pQ.put, q.sort( q.accumulate( q.mergefiles(in_files, (q._unique_key, ))), (q._priority_key, ))) # slow version of this generator chain: #merged_lines = [] #for x in q.mergefiles(in_files, q._unique_key): # syslog("out of mergefiles: %s" % repr(x)) # merged_lines.append(x) #accumulated_lines = [] #for x in q.accumulate(merged_lines): # syslog("out of accumulator: %s" % repr(x)) # accumulated_lines.append(x) #for x in q.sort(accumulated_lines, q._priority_key): # syslog("out of sort: %s" % repr(x)) # pQ.put(x) end = time() pQ.close() syslog(LOG_INFO, "merge took %.1f seconds" % (end - start)) shutil.rmtree(q._pQ_sync) shutil.rmtree(q._dumpQ_sync) _sync_pending.release() except Exception, exc: map(lambda line: syslog(LOG_NOTICE, line), traceback.format_exc(exc).splitlines()) merger = Merger() merger.start() return merger
def communication_loop(self): while True: msg = self.communicationManager.recv_message() # blocking if msg is None: break if msg.type == "REQUEST": # requesting CS access mux = Mutex.get_mutex(msg.referenceId) if mux is not None: mux.operationMutex.acquire() if mux.requesting: # if REQUEST has earlier time then send AGREE if mux.requestClock < msg.clock: # add to queue mux.heldUpRequests.append(msg.senderId) else: if mux.requestClock == msg.clock and self.communicationManager.processId < msg.senderId: mux.heldUpRequests.append(msg.senderId) else: agreeReply = Message() agreeReply.referenceId = msg.referenceId agreeReply.recipientId = msg.senderId if mux.keepAlive: agreeReply.type = "RETURN" if mux.get_data_size() > 0: agreeReply.hasData = True else: agreeReply.type = "AGREE" self.communicationManager.send_message(agreeReply) else: if not mux.locked: agreeReply = Message() agreeReply.referenceId = msg.referenceId agreeReply.recipientId = msg.senderId if mux.keepAlive: agreeReply.type = "RETURN" else: agreeReply.type = "AGREE" self.communicationManager.send_message(agreeReply) else: mux.heldUpRequests.append(msg.senderId) mux.operationMutex.release() elif msg.type == "RETURN": # when leaving CS mux = Mutex.get_mutex(msg.referenceId) if mux is not None: mux.operationMutex.acquire() mux.agreeVector[msg.senderId] = True mux.keepAlive = False if mux.previousReturn is not None: del mux.previousReturn mux.previousReturn = msg mux.operationMutex.release() self.enter_critical_section(mux) elif msg.type == "AGREE": # process agrees to request for CS mux = Mutex.get_mutex(msg.referenceId) mux.operationMutex.acquire() if mux is not None and mux.requesting: mux.agreeVector[msg.senderId] = True # sender agreed mux.keepAlive = False # sb tries to acquire CS mux.operationMutex.release() self.enter_critical_section(mux) # try to enter CS else: mux.operationMutex.release() elif msg.type == "WAIT": # process is waiting cv = ConditionVariable.get_condition_variable(msg.referenceId) cv.conditionVariable.acquire() cv.waitingProcesses.append(msg.senderId) cv.conditionVariable.release() elif msg.type == "WAIT_RETURN": # process is not waiting anymore cv = ConditionVariable.get_condition_variable(msg.referenceId) cv.conditionVariable.acquire() cv.waitingProcesses.remove(msg.senderId) cv.conditionVariable.release() elif msg.type == "SIGNAL": # wake up waiting process cv = ConditionVariable.get_condition_variable(msg.referenceId) cv.conditionVariable.acquire() cv.waiting = False cv.conditionVariable.notify() cv.conditionVariable.release() elif msg.type == "QUIT": # process will no longer communicate self.quitMessages += 1 if self.quitMessages == self.communicationManager.processCount: self.communicationManager.initialized = False return
def handle_text_message(event): text = event.message.text sourceId = getSourceId(event.source) matcher = re.match(r'^#(\d+) (.+)', text) if text == 'join': #メンバ集め number = str(redis.get('maxVoteKey')).encode('utf-8') join_mutex = Mutex(redis, JOIN_MUTEX_KEY_PREFIX + number) join_mutex.lock() redis.sadd(number, sourceId) redis.hset(number + '_member', redis.scard(number), sourceId) redis.hset(sourceId, 'current', number) if join_mutex.is_lock(): time.sleep(JOIN_MUTEX_TIMEOUT) push_all( number, TextSendMessage(text='投票No.' + str(number) + ' (参加者' + str(redis.scard(number)) + '人)の投票板です\uD83D\uDE04\n' + '5秒間投票をスタートするなら 投票開始≫ ボタンを押してね\uD83D\uDE03')) push_all(number, generate_planning_poker_message(number)) join_mutex.unlock() redis.incr('maxVoteKey') elif text == 'add': current = redis.hget(sourceId, 'current') if current != '-': remove_member(current, sourceId) line_bot_api.push_message( sourceId, TextSendMessage(text='参加したい投票No.を入力してください\uD83D\uDE03')) redis.hset(sourceId, 'status', 'number_wait') elif matcher is not None: number = matcher.group(1) value = matcher.group(2) current = redis.hget(sourceId, 'current').encode('utf-8') if current != number: line_bot_api.push_message( sourceId, TextSendMessage(text='投票板が古かった?もう一度お願いします!')) line_bot_api.push_message(sourceId, generate_planning_poker_message(current)) return if value == '11': #退出 resign_operation(number, sourceId) return status = redis.hget('status_' + number, 'status') if status is None: vote_mutex = Mutex(redis, VOTE_MUTEX_KEY_PREFIX + number) if value == '0': #開始 vote_mutex.lock() if vote_mutex.is_lock(): push_all( number, TextSendMessage( text='5秒間投票をはじめます\uD83D\uDD52名前をタップして投票どうぞ\u2755')) redis.hset('status_' + number, 'status', 'inprogress') time.sleep(2) push_all(number, TextSendMessage(text='あと3秒!')) time.sleep(3) push_all( number, TextSendMessage(text='-\uD83D\uDD52投票終了\uD83D\uDD52-')) vote_mutex.unlock() redis.delete('status_' + number) member_list = redis.smembers(number) for memberid in member_list: redis.hset(memberid, 'voted', 'N') push_result_message(number) #結果発表後の結果クリア redis.delete('res_' + number) refresh_board(number) return else: line_bot_api.push_message( sourceId, TextSendMessage(text='投票開始ボタンがまだ押されていないようです\uD83D\uDCA6')) else: if redis.hget(sourceId, 'voted') == 'Y': line_bot_api.push_message( sourceId, TextSendMessage(text='すでに投票済です・・結果集計まで待ってね\uD83D\uDE04')) return elif value == '0': line_bot_api.push_message( sourceId, TextSendMessage(text='もうはじまってるよ、誰かに投票して!\uD83D\uDE04')) elif value == '11': resign_operation(number, sourceId) else: #異常値処理省略 redis.hincrby('res_' + number, value) redis.hset(sourceId, 'voted', 'Y') else: current = redis.hget(sourceId, 'current') if current is not None and current != '-': display_name = getUtfName(line_bot_api.get_profile(sourceId)) push_all_except_me(current, sourceId, TextSendMessage(text=display_name + ':' + text)) elif redis.hget(sourceId, 'status') == 'number_wait': if text == '0': redis.hdel(sourceId, 'status') line_bot_api.push_message( sourceId, TextSendMessage( text='始めるときは参加!ボタンをみんなと一緒に押してね\uD83D\uDE04')) line_bot_api.push_message(sourceId, generateJoinButton()) elif redis.exists(text) == 1: redis.hdel(sourceId, 'status') redis.sadd(text, sourceId) redis.hset(sourceId, 'current', text) if redis.hget('status_' + text, 'status') is None: redis.hset(text + '_member', redis.scard(text), sourceId) push_all(text, TextSendMessage(text='メンバーが増えたので再度投票板を表示します')) push_all( text, TextSendMessage( text='投票No.' + str(text) + ' (参加者' + str(redis.scard(text)) + '人)の投票板です\uD83D\uDE04\n' + '5秒間投票をスタートするなら 投票開始≫ ボタンを押してね\uD83D\uDE03')) push_all(text, generate_planning_poker_message(text)) else: line_bot_api.push_message( sourceId, TextSendMessage( text= '見つからないです・・参加したい投票No.を再入力してね\uD83D\uDE22(初期画面に戻るなら 0 )' ))
# hello.py # from mpi4py import MPI # comm = MPI.COMM_WORLD # rank = comm.Get_rank() # size = comm.Get_size() # print "hello world from process ", rank+1, " of ", size from mutex import Mutex from conditionvariable import ConditionVariable from monitor import Monitor import time m = Mutex(1) cv = ConditionVariable(1) monitor = Monitor() # nowe watki tworzone sa w monitorze if monitor.communicationManager.processId > 0: # te procesy symuluja czekanie monitor.lock(m) monitor.wait(cv, m) monitor.log("INFO", "Stopped waiting, going to sleep") time.sleep(1) monitor.signal(cv) monitor.unlock(m) else: time.sleep(2) monitor.lock(m) monitor.signal(cv) monitor.unlock(m) monitor.finalize()