def loop(self): while self._running: import time time.sleep(0.01) import uwsgi try: uwsgi.lock() line = self.ser.readline().decode( 'utf-8')[:-2] #read bytes from serial, last 2 chars = /r/n except Exception as e: #print(e) pass finally: uwsgi.unlock() pass try: v = parse_ser(line) except ValueError as e: #print(f'error:{e}\nstr:{line}') pass except Exception as e: #print(f'error:{e}\nline{line}') pass else: self.update_con(v)
def serve_rede_json_cnpj(cpfcnpj, camada=1): # if request.remote_addr in ('xxx'): # return jsonify({'acesso':'problema no acesso. favor não utilizar como api de forma intensiva, pois isso pode causar bloqueio para outros ips.'}) camada = min(gp['camadaMaxima'], int(camada)) #cpfcnpj = cpfcnpj.upper().strip() #upper dá inconsistência com email, que está minusculo na base cpfcnpj = cpfcnpj.strip() listaIds = request.get_json() r = None if gUwsgiLock: uwsgi.lock() try: with gLock: if listaIds: cpfcnpj='' if not cpfcnpj: noLig = rede_relacionamentos.camadasRede(cpfcnpjIn=None, listaIds=listaIds, camada=camada, grupo='', bjson=True) elif cpfcnpj.startswith('PJ_') or cpfcnpj.startswith('PF_'): noLig = rede_relacionamentos.camadasRede(cpfcnpjIn=cpfcnpj, camada=camada, grupo='', bjson=True ) elif cpfcnpj.startswith('EN_') or cpfcnpj.startswith('EM_') or cpfcnpj.startswith('TE_'): noLig = rede_relacionamentos.camadaLink(cpfcnpjIn=cpfcnpj, listaIds=listaIds, camada=camada, tipoLink='endereco') elif cpfcnpj.startswith('ID_'): #ver se o upper é necessário noLig = rede_relacionamentos.camadaLink(cpfcnpjIn=cpfcnpj.upper(), listaIds=listaIds, camada=camada, tipoLink='base_local') else: noLig = rede_relacionamentos.camadasRede(cpfcnpjIn=cpfcnpj, camada=camada) r = jsonify(noLig) finally: if gUwsgiLock: uwsgi.unlock() return r
def post(self): """ Add posted files and json manifest Add all posted files along with a json manifest file which includes the multihash for each file as well as any form fields. Returns the submission manifest and its multihash """ manifest = {"fields": {key: value for key, value in request.form.items()}} manifest["files"] = [{"name": f.filename, "multihash": "{}".format(g.ipfs.add(f)[1]["Hash"])} for f in request.files.getlist("files[]")] logging.debug("Manifest: {}".format(manifest)) manifest_multihash = g.ipfs.add(cStringIO.StringIO( json.dumps(manifest, sort_keys=True)))[1]["Hash"] logging.info("Manifest multihash: {}".format(manifest_multihash)) # Update steward submissions list and publish to ipns uwsgi.lock() # make sure only one process does this at a time steward = get_steward() if manifest_multihash not in steward["submissions"]: steward["submissions"] = sorted( steward["submissions"] + [manifest_multihash]) update_steward(steward) logging.debug("{} added to submissions list".format(manifest_multihash)) else: logging.debug("{} already in submissions list".format(manifest_multihash)) uwsgi.unlock() return jsonify(multihash=manifest_multihash)
def emit(_): """Serialize metrics to the memory mapped buffer.""" if not initialized: raise NotInitialized view = { 'version': __version__, 'counters': {}, 'gauges': {}, 'histograms': {}, 'meters': {}, 'timers': {}, } for (ty, module, name), metric in all_metrics.iteritems(): view[ty]['%s.%s' % (module, name)] = metric.view() marshalled_view = marshal.dumps(view) if len(marshalled_view) > MAX_MARSHALLED_VIEW_SIZE: log.warn('Marshalled length too large, got %d, max %d. ' 'Try recording fewer metrics or increasing ' 'MAX_MARSHALLED_VIEW_SIZE' % (len(marshalled_view), MAX_MARSHALLED_VIEW_SIZE)) return marshalled_metrics_mmap.seek(0) try: # Reading and writing to/from an mmap'ed buffer is not guaranteed # to be atomic, so we must serialize access to it. uwsgi.lock() marshalled_metrics_mmap.write(marshalled_view) finally: uwsgi.unlock()
def emit(_): """Serialize metrics to the memory mapped buffer.""" if not initialized: raise NotInitialized view = { 'version': __version__, 'counters': {}, 'gauges': {}, 'histograms': {}, 'meters': {}, 'timers': {}, } for (ty, module, name), metric in all_metrics.iteritems(): view[ty]['%s.%s' % (module, name)] = metric.view() marshalled_view = marshal.dumps(view) if len(marshalled_view) > MAX_MARSHALLED_VIEW_SIZE: log.warn( 'Marshalled length too large, got %d, max %d. ' 'Try recording fewer metrics or increasing ' 'MAX_MARSHALLED_VIEW_SIZE' % (len(marshalled_view), MAX_MARSHALLED_VIEW_SIZE)) return marshalled_metrics_mmap.seek(0) try: # Reading and writing to/from an mmap'ed buffer is not guaranteed # to be atomic, so we must serialize access to it. uwsgi.lock() marshalled_metrics_mmap.write(marshalled_view) finally: uwsgi.unlock()
def block_if_paused(): was_locked = uwsgi.is_locked() if was_locked: print('Bot is paused, waiting for resume...') uwsgi.lock() uwsgi.unlock() if was_locked: print('Resuming...')
def __call__(self, *args, **kwargs): # ensure the spooler will not call it if uwsgi.i_am_the_spooler(): return uwsgi.lock() try: return self.f(*args, **kwargs) finally: uwsgi.unlock()
def decorated(*args, **kwargs): # ensure the spooler will not call it if i_am_the_spooler(self.lock_id): return lock(self.lock_id) try: return function(*args, **kwargs) finally: unlock(self.lock_id)
def application(e, s): print "locking" uwsgi.lock() print "locked" time.sleep(3) uwsgi.unlock() print "UN-locked" s('200 OK', [('Content-Type', 'text/html')]) return "slow"
def application(e,s): print "locking" uwsgi.lock() print "locked" time.sleep(3) uwsgi.unlock() print "UN-locked" s('200 OK', [('Content-Type','text/html')]) return "slow"
def global_crypto_check(signum): uwsgi.lock() try: call_command('btc_fee') except: traceback.print_exc() uwsgi.unlock()
def serve_dados_detalhes(cpfcnpj): if gUwsgiLock: uwsgi.lock() try: with gLock: r = rede_relacionamentos.jsonDados(cpfcnpj) return jsonify(r) finally: if gUwsgiLock: uwsgi.unlock()
def long_processing_trans_in(signum): uwsgi.lock() try: call_command("incoming_crypto_merger", "1489820322") except: traceback.print_exc() uwsgi.unlock()
def monero_work(signum): uwsgi.lock() try: call_command("process_monero") except: traceback.print_exc() uwsgi.unlock()
def get_state(self): '''Get the current state of Bartendro''' # If we're not running inside uwsgi, then we can't keep global state if not have_uwsgi: return self.state uwsgi.lock() state = uwsgi.sharedarea_readbyte(1) uwsgi.unlock() return state
def set_state(self, state): """Set the current state of Bartendro""" # If we're not running inside uwsgi, then don't try to use the lock if not have_uwsgi: return uwsgi.lock() uwsgi.sharedarea_writebyte(1, state) uwsgi.unlock() return True
def get_state(self): '''Get the current state of Bartendro''' # If we're not running inside uwsgi, then we can't keep global state if not have_uwsgi: return self.state uwsgi.lock() state = uwsgi.sharedarea_read8(0, 1) uwsgi.unlock() return state
def post(self): """ Make a submission Add all posted files to ipfs and then adds a json manifest consisting of a 'fields' dictionary made up of all the form fields in the POST and a 'files' array with the name and multihash of each posted file: { "fields": {"field_name": "field_value", ...} "files": [{"name": "file_name", "multihash": "ipfs_multihash of file"}...] } Passing publish=False will skip adding the submission to the stewards index. This is useful if you want to make a large number of submissions and need to avoid an ipns publish on every one. Note that the submission will essentially be 'dangling' and up to the client to keep track of and at some point add via PUT. Returns the multihash of the submission """ manifest = { "fields": {key: value for key, value in request.form.items()} } manifest["files"] = sorted( [{ "name": f.filename, "multihash": "{}".format(g.ipfs.add(f)["Hash"]) } for f in request.files.getlist("files[]")], key=lambda k: k["name"]) logging.debug("Manifest: {}".format(manifest)) manifest_multihash = g.ipfs.add( cStringIO.StringIO(json.dumps(manifest, sort_keys=True)))["Hash"] logging.info("Manifest multihash: {}".format(manifest_multihash)) # Update steward submissions list and publish to ipns args = submit_parser.parse_args() if args["publish"]: uwsgi.lock() # make sure only one process does this at a time steward = get_steward() if manifest_multihash not in steward["submissions"]: steward["submissions"] = sorted(steward["submissions"] + [manifest_multihash]) update_steward(steward) logging.debug( "{} added to submissions list".format(manifest_multihash)) uwsgi.unlock() else: logging.debug( "{} NOT added to submissions list".format(manifest_multihash)) return jsonify(multihash=manifest_multihash)
def bootstrap(): import uwsgi import os uwsgi.lock() min_pid = min([w['pid'] for w in uwsgi.workers()]) if min_pid == os.getpid(): from oncall.ui import build print 'building webassets...' build() uwsgi.unlock()
def view(): """Get a dictionary representation of current metrics.""" if not initialized: raise NotInitialized marshalled_metrics_mmap.seek(0) try: uwsgi.lock() marshalled_view = marshalled_metrics_mmap.read( MAX_MARSHALLED_VIEW_SIZE) finally: uwsgi.unlock() return marshal.loads(marshalled_view)
def sighandler(signum): now = int(time.time()) key = 'scheduler_call_time_signal_' + str(signum) uwsgi.lock() try: updating = uwsgi.cache_get(key) if updating is not None: updating = int.from_bytes(updating, 'big') if now - updating < delay: return uwsgi.cache_update(key, now.to_bytes(4, 'big')) finally: uwsgi.unlock() func(*args)
def locked(*args, **kwargs): if uwsgi.i_am_the_spooler(): return if args[0].__is_locked: return f(*args, **kwargs) uwsgi.lock() args[0].__is_locked = True try: return f(*args, **kwargs) finally: args[0].__is_locked = False uwsgi.unlock()
def unlock_bartendro(self): """Call this function when you've previously locked bartendro and now you want to unlock it.""" # If we're not running inside uwsgi, then don't try to use the lock if not have_uwsgi: return True uwsgi.lock() is_locked = uwsgi.sharedarea_read8(0, 0) if not is_locked: uwsgi.unlock() return False uwsgi.sharedarea_write8(0, 0, 0) uwsgi.unlock() return True
def serve_rede_json_links(cpfcnpj='', camada=1, numeroItens=15, valorMinimo=0, valorMaximo=0): r = None if gUwsgiLock: uwsgi.lock() try: with gLock: camada = min(gp['camadaMaxima'], int(camada)) listaIds = request.get_json() if listaIds: cpfcnpj='' r = jsonify(rede_relacionamentos.camadaLink(cpfcnpjIn=cpfcnpj, listaIds=listaIds, camada=camada, numeroItens=numeroItens, valorMinimo=valorMinimo, valorMaximo=valorMaximo, tipoLink='link')) finally: if gUwsgiLock: uwsgi.unlock() return r
def unlock_bartendro(self): """Call this function when you've previously locked bartendro and now you want to unlock it.""" # If we're not running inside uwsgi, then don't try to use the lock if not have_uwsgi: return True uwsgi.lock() is_locked = uwsgi.sharedarea_readbyte(0) if not is_locked: uwsgi.unlock() return False uwsgi.sharedarea_writebyte(0, 0) uwsgi.unlock() return True
def post(self, address): """ Add address to this steward's peer list """ uwsgi.lock() # make sure only one process does this at a time steward = get_steward() if address == g.ipfs.id()["ID"]: logging.warning("Attempt to add this steward's address to peer list") elif address not in steward["peers"]: # Sort so adding in different order yields the same list steward["peers"] = sorted(steward["peers"] + [address]) update_steward(steward) logging.info("Added {} to peer list".format(address)) uwsgi.unlock() return steward["peers"]
def post(self): """ Make a submission Add all posted files to ipfs and then adds a json manifest consisting of a 'fields' dictionary made up of all the form fields in the POST and a 'files' array with the name and multihash of each posted file: { "fields": {"field_name": "field_value", ...} "files": [{"name": "file_name", "multihash": "ipfs_multihash of file"}...] } Passing publish=False will skip adding the submission to the stewards index. This is useful if you want to make a large number of submissions and need to avoid an ipns publish on every one. Note that the submission will essentially be 'dangling' and up to the client to keep track of and at some point add via PUT. Returns the multihash of the submission """ manifest = {"fields": {key: value for key, value in request.form.items()}} manifest["files"] = sorted([{"name": f.filename, "multihash": "{}".format(g.ipfs.add(f)["Hash"])} for f in request.files.getlist("files[]")], key=lambda k: k["name"]) logging.debug("Manifest: {}".format(manifest)) manifest_multihash = g.ipfs.add(cStringIO.StringIO( json.dumps(manifest, sort_keys=True)))["Hash"] logging.info("Manifest multihash: {}".format(manifest_multihash)) # Update steward submissions list and publish to ipns args = submit_parser.parse_args() if args["publish"]: uwsgi.lock() # make sure only one process does this at a time steward = get_steward() if manifest_multihash not in steward["submissions"]: steward["submissions"] = sorted( steward["submissions"] + [manifest_multihash]) update_steward(steward) logging.debug("{} added to submissions list".format(manifest_multihash)) uwsgi.unlock() else: logging.debug("{} NOT added to submissions list".format(manifest_multihash)) return jsonify(multihash=manifest_multihash)
def post(self, address): """ Add address to this steward's peer list """ uwsgi.lock() # make sure only one process does this at a time steward = get_steward() if address == g.ipfs.id()["ID"]: logging.warning( "Attempt to add this steward's address to peer list") elif address not in steward["peers"]: # Sort so adding in different order yields the same list steward["peers"] = sorted(steward["peers"] + [address]) update_steward(steward) logging.info("Added {} to peer list".format(address)) uwsgi.unlock() return steward["peers"]
def lock_bartendro(self): """Call this function before making a drink or doing anything that where two users' action may conflict. This function will return True if the lock was granted, of False is someone else has already locked Bartendro.""" # If we're not running inside uwsgi, then don't try to use the lock if not have_uwsgi: return True uwsgi.lock() is_locked = uwsgi.sharedarea_readbyte(0) if is_locked: uwsgi.unlock() return False uwsgi.sharedarea_writebyte(0, 1) uwsgi.unlock() return True
def lock_bartendro(self): """Call this function before making a drink or doing anything that where two users' action may conflict. This function will return True if the lock was granted, of False is someone else has already locked Bartendro.""" # If we're not running inside uwsgi, then don't try to use the lock if not have_uwsgi: return True uwsgi.lock() is_locked = uwsgi.sharedarea_read8(0, 0) if is_locked: uwsgi.unlock() return False uwsgi.sharedarea_write8(0, 0, 1) uwsgi.unlock() return True
def put(self): """ Add a list of existing submissions Add the multihash of an existing submission to this server's index. Used with publish=False in POST to add multiple submissions to the index at once and therefore avoid the ipns publish on each. """ uwsgi.lock() # make sure only one process does this at a time steward = get_steward() for s in request.json["submissions"]: if s not in steward["submissions"]: logging.debug("{} added to submissions list".format(s)) steward["submissions"] = sorted(steward["submissions"] + [s]) else: logging.debug("{} already in submissions list".format(s)) update_steward(steward) uwsgi.unlock() logging.debug("{} bulk published".format(request.json["submissions"])) return jsonify(request.json)
def get_url(url): """Download a file from url to cache_dir.""" # set a lock to prevent multiple simultaneous downloads of the same file mypid = os.getpid() uwsgi.lock() otherpid = uwsgi.cache_get(url) if otherpid: uwsgi.unlock() while otherpid: log('D: [%d] waiting for pid %s to download %s' % (mypid, otherpid, url)) time.sleep(1) otherpid = uwsgi.cache_get(url) return 200 else: uwsgi.cache_set(url, str(mypid)) uwsgi.unlock() dest = localfile(url) log('D: [%d] downloading %s to %s' % (mypid, url, dest)) curl = pycurl.Curl() curl.setopt(curl.URL, url) curl.setopt(curl.FOLLOWLOCATION, True) path = '/'.join(dest.split('/')[:-1]) if not os.path.exists(path): # parallel download of rpms in subdir will create it right now try: os.makedirs(path) except OSError as e: # this catches duplicate creation (so just W not E) # TODO: need to bypass the open() on real errors # like permissions log('W: [%d] OS error(%d): %s' % (mypid, e.errno, e.strerror)) with open(dest, 'wb') as fil: curl.setopt(curl.WRITEFUNCTION, fil.write) curl.perform() uwsgi.cache_del(url) return curl.getinfo(curl.HTTP_CODE)
def serve_rede_json_cnpj(cpfcnpj, camada=1): camada = min(camadaMaxima, int(camada)) listaIds = request.get_json() r = None if gUwsgiLock: uwsgi.lock() try: with gLock: if listaIds: cpfcnpj='' if not cpfcnpj: r = jsonify(rede_relacionamentos.camadasRede(cpfcnpjIn=cpfcnpj, listaIds=listaIds, camada=camada, grupo='', bjson=True)) elif cpfcnpj.startswith('PJ_') or cpfcnpj.startswith('PF_'): r = jsonify(rede_relacionamentos.camadasRede(cpfcnpjIn=cpfcnpj, camada=camada, grupo='', bjson=True )) elif cpfcnpj.startswith('EN_') or cpfcnpj.startswith('EM_') or cpfcnpj.startswith('TE_'): r = jsonify(rede_relacionamentos.camadaLink(cpfcnpjIn=cpfcnpj, listaIds=listaIds, camada=camada, tipoLink='endereco')) else: r = jsonify(rede_relacionamentos.camadasRede(cpfcnpj, camada=camada)) finally: if gUwsgiLock: uwsgi.unlock() return r
def get_url(url): """Download a file from url to cache_dir.""" # set a lock to prevent multiple simultaneous downloads of the same # file mypid = os.getpid() uwsgi.lock() otherpid = uwsgi.cache_get(url) if otherpid: uwsgi.unlock() while otherpid: log('D: pid %d waiting for pid %s to download %s' % (mypid, otherpid, url)) time.sleep(1) otherpid = uwsgi.cache_get(url) return 200 else: uwsgi.cache_set(url, str(mypid)) uwsgi.unlock() dest = localfile(url) log('D: pid %d downloading %s' % (mypid, url)) curl = pycurl.Curl() curl.setopt(curl.URL, url) path = '/'.join(dest.split('/')[:-1]) if not os.path.exists(path): # parallel download of rpms in subdir will create it right now try: os.makedirs(path) except OSError as e: # this catches duplicate creation (so just W not E) # TODO: need to bypass the open() on real errors # like permissions log('W: OS error(%d): %s' % (e.errno, e.strerror)) with open(dest, 'wb') as fil: curl.setopt(curl.WRITEFUNCTION, fil.write) curl.perform() uwsgi.cache_del(url) return curl.getinfo(curl.HTTP_CODE)
def out_trans(signum): uwsgi.lock() try: call_command('out_crypto_merger') except: traceback.print_exc() try: call_command('out_monero') except: traceback.print_exc() try: call_command('out_crypto_eth') except: traceback.print_exc() try: call_command('out_crypto_block', "BTC", "0") except: traceback.print_exc() uwsgi.unlock()
def __exit__(self, *args): uwsgi.unlock(self.lock_idx)
def __exit__(self, exc_type, exc_val, exc_tb): uwsgi.unlock()
def unlock(self): uwsgi.unlock()
def handle_post(env): ''' process the form submission and return data structures note what dict(parse_qsl(formdata)) does: >>> from urllib.parse import parse_qsl >>> parse_qsl('a=b&b=c&a=d&a=e') [('a', 'b'), ('b', 'c'), ('a', 'd'), ('a', 'e')] >>> OrderedDict(_) {'a': 'e', 'b': 'c'} >>> so only use it where you know that no key will have more than one value. parse_qs will instead return a dict of lists. ''' uwsgi.lock() # lock access to DATA global worker = getattr(uwsgi, 'worker_id', lambda *args: None)() DATA['handler'] = (worker, env.get('uwsgi.core')) timestamp = datetime.datetime.utcnow().isoformat() try: if env.get('REQUEST_METHOD') != 'POST': return copy.deepcopy(DATA) posted = urllib.parse.parse_qsl(env['wsgi.input'].read().decode()) DATA['postdict'] = postdict = dict(posted) logging.debug('handle_post: %s, postdict: %s', posted, postdict) # [groupname, total, turn] and submit=Submit if group creation # [username, group] and submit=Join if joining a group postdict['timestamp'] = timestamp if not postdict.get('httpsession_key'): postdict['httpsession_key'] = uuid.uuid4().hex logging.debug('set httpsession_key = %s', postdict['httpsession_key']) try: buttonvalue = postdict['submit'] except KeyError: raise ValueError('No "submit" button found') update_httpsession(postdict) if buttonvalue == 'Join': # username being added to group # don't allow if name already in group groups = DATA['groups'] logging.debug('processing Join: %s', postdict) username = postdict.get('username', '') group = postdict.get('group', '') if not username: raise ValueError('Name field cannot be empty') elif group in groups: postdict['groupname'] = group if username in groups[group]['participants']: raise ValueError('"%s" is already a member of %s' % (username, group)) groups[group]['participants'][username] = { 'timestamp': timestamp } if 'talksession' not in groups[group]: groups[group]['talksession'] = {'start': timestamp} postdict['joined'] = True # else group not in groups, no problem, return to add group form return copy.deepcopy(DATA) elif buttonvalue == 'Submit': # groupname, total (time), turn (time) being added to groups # don't allow if groupname already being used groups = DATA['groups'] group = postdict['groupname'] if not group in groups: groups[group] = postdict groups[group]['participants'] = {} return copy.deepcopy(DATA) else: raise ValueError( ('Group {group[groupname]} already exists with total time ' '{group[total]} minutes and turn time ' '{group[turn]} seconds').format(group=groups[group])) elif buttonvalue == 'OK': # affirming receipt of error message or Help screen return copy.deepcopy(DATA) elif buttonvalue == 'Help': raise UserWarning('Help requested') elif buttonvalue == 'My Turn': # attempting to speak in ongoing session # this could only be reached by browser in which JavaScript did # not work properly in taking over default actions logging.debug('env: %s', env) raise NotImplementedError( 'Browser \'%s\' incompatible with script' % env.get('HTTP_USER_AGENT', '(unknown)')) else: raise ValueError('Unknown form submitted') except UserWarning as request: if str(request) == 'Help requested': logging.debug('displaying help screen') DATA['postdict']['text'] = read(os.path.join(THISDIR, 'README.md')) return copy.deepcopy(DATA) except EXPECTED_ERRORS as failed: logging.debug('displaying error: "%r"', failed) DATA['postdict']['text'] = repr(failed) return copy.deepcopy(DATA) finally: uwsgi.unlock()
def resume(): uwsgi.unlock()
def __unlock(self, name_or_id): try: uwsgi.unlock(name_or_id) except TypeError: uwsgi.unlock(self._locks.index(name_or_id))