def order(): if not order_param: order_conf = py_.find(ordering, {'default': True}) else: order_conf = py_.find(ordering, {'value': order_param}) if not order_conf: return '' return f"ORDER BY {order_conf.get('column')} {order_conf.get('type')}"
def handle_stream_binance_price(json): users = [] query = sql.Person.select().where((sql.Person.PASSPORT_ID == json['passport']['id']) & ( sql.Person.PASSPORT_SECRET == json['passport']['secret_key'])).dicts().execute() for user in query: users.append(user) if users: tickers = get_all_tickers() emit('stream_binance_price', { 'symbol': 'BTCRUB', 'price': float(pydash.find(tickers, {'symbol': 'BTCRUB'}).get('price', -1)) }) emit('stream_binance_price', { 'symbol': 'USDTRUB', 'price': float(pydash.find(tickers, {'symbol': 'USDTRUB'}).get('price', -1)) }) emit('stream_binance_price', { 'symbol': 'ETHRUB', 'price': float(pydash.find(tickers, {'symbol': 'ETHRUB'}).get('price', -1)) }) emit('stream_binance_price', { 'symbol': 'LTCRUB', 'price': float(pydash.find(tickers, {'symbol': 'LTCRUB'}).get('price', -1)) })
def get_order_by_id(self, exchange: str, symbol: str, id: str, use_exchange_id=False) -> Order: key = '{}-{}'.format(exchange, symbol) if use_exchange_id: return pydash.find(self.storage[key], lambda o: o.exchange_id == id) return pydash.find(self.storage[key], lambda o: o.id == id)
def _process(self, accum, row): # print(a) elem = find(accum, {'employee_name': row['employee_name']}) if elem is None: elem = { 'employee_name': row['employee_name'], 'results': [{ 'ddate': row['ddate'], 'qty': 0, }], 'qty': 0, } accum.append(elem) elem['qty'] = elem['qty'] + row['qty'] day_result = find(elem['results'], {'ddate': row['ddate']}) if day_result is None: day_result = { 'ddate': row['ddate'], 'qty': 0, } elem['results'].append(day_result) day_result['qty'] = day_result['qty'] + row['qty'] return accum
def get_order_by_id(self, exchange: str, symbol: str, id: str, use_exchange_id: bool = False) -> Order: key = f'{exchange}-{symbol}' if use_exchange_id: return pydash.find(self.storage[key], lambda o: o.exchange_id == id) return pydash.find(self.storage[key], lambda o: o.id == id)
def ask_columns( conn_id: int, columns: List[ms.ColumnIn], db: Session = Depends(get_db), ): connection = db.query(models.Connection).get(conn_id) expressions = redact.get_expressions_in_columns(connection, columns) red_columns = redact.get_columns_in_columns(connection, columns) answers: List[s.ColumnAnswerOut] = [] for c in columns: answer: s.ColumnAnswerOut = s.ColumnAnswerOut( owner=c.owner, table_name=c.table_name, column_name=c.column_name) answer.column = pydash.find( red_columns, lambda x: x.object_owner == c.owner and x.object_name == c. table_name and x.column_name == c.column_name, ) answer.expression = pydash.find( expressions, lambda x: x.object_owner == c.owner and x.object_name == c. table_name and x.column_name == c.column_name, ) answers.append(answer) return answers
async def return_resource(self, worker_id, resource_id, slot_id=None): resource = await self.get_resource(resource_id) if slot_id: target_slots = pydash.find(resource['resource_slots'], {'id': slot_id}) else: target_slots = pydash.find(resource['resource_slots'], { 'user_id': worker_id, 'status': 'occupied' }) if target_slots: request = self._httpclient.request() url = self._httpclient.config_url( '/api/resources/{}/slots/{}'.format(resource_id, target_slots['id'])) req_json_body = json.dumps({'status': 'gone'}) try: async with request.put(url, data=req_json_body) as r: # TODO error json_data = await r.json() print(r) return json_data except Exception as exc: print(exc) return None else: #raise Exception('can return only occupied resource') print('can return only occupied resource')
async def on_perform(self, context, args): target_resource_id = pydash.find(args, {'key':'resource'})['value'] quit_point = pydash.find(args, {'key':'quit'})['value'] MAX_RETRY = 100 REQUEST_INTERVAL = 100 worker_id = context.blackboard.get_worker() actuator = Actuator() await actuator.moving(context, quit_point) waiter = Waiter(REQUEST_INTERVAL) for i in range(MAX_RETRY): print('return occupied resuorce {}/{}'.format(i+1, MAX_RETRY)) target_resource = await context.api_configuration.get_resource(target_resource_id) target_slots = target_resource['resource_slots'] occupied = pydash.find(target_slots, {'user_id': worker_id, 'status': 'occupied'}) try: await context.api_configuration.return_resource(worker_id, target_resource_id, occupied['id']) return True except Exception as err: print(err) print('failed to return resource with error') await waiter.wait() print('exceed maximum try to return occupied resource') return False
def get_object_from_id(oid, pid): hit_oid = py_.find(objects['thing-descriptions'], {'oid': oid}) if hit_oid: hit_values = py_.find(hit_oid['values'], {'pid': pid}) return jsonify(py_.omit(hit_values,'pid')) else: content = {'Device or property not found'} abort(404)
def main(): global runner device = torch.device("cuda" if torch.cuda.is_available() else "cpu") load_dotenv(dotenv_path='.env') flag_argnames = [ 'load_model', 'use_adaptive_softmax', 'use_fast_sampler', 'dont_use_hardcoded_cutoffs', 'use_ranking_loss', 'dont_use_deep_network', 'use_cnn_local', 'use_lstm_local', 'dont_freeze_word_embeddings', 'dont_continue_training', 'cheat', 'use_conll', 'use_wiki2vec' ] args = getopt.getopt( _.tail(sys.argv), '', flag_argnames + [arg['name'] + '=' for arg in args_with_values])[0] flags = [_.head(arg) for arg in args] train_params = m(use_fast_sampler='--use_fast_sampler' in flags) run_params = m(load_model='--load_model' in flags, cheat='--cheat' in flags, continue_training='--dont_continue_training' not in flags, use_conll='--use_conll' in flags) model_params = m(use_adaptive_softmax='--use_adaptive_softmax' in flags, use_hardcoded_cutoffs='--dont_use_hardcoded_cutoffs' not in flags, use_ranking_loss='--use_ranking_loss' in flags, use_cnn_local='--use_cnn_local' in flags, use_lstm_local='--use_lstm_local' in flags, use_deep_network='--dont_use_deep_network' not in flags, freeze_word_embeddings='--dont_freeze_word_embeddings' not in flags, use_wiki2vec='--use_wiki2vec' in flags) paths = m(lookups=os.getenv("LOOKUPS_PATH"), page_id_order=os.getenv("PAGE_ID_ORDER_PATH")) for arg in args_with_values: name = arg['name'] pair = _.find(args, lambda pair: name in pair[0]) if pair: parsed = arg['type'](pair[1]) if arg['for'] == 'path': paths = paths.set(name, parsed) elif arg['for'] == 'model_param': model_params = model_params.set(name, parsed) elif arg['for'] == 'train_param': train_params = train_params.set(name, parsed) elif arg['for'] == 'run_param': run_params = run_params.set(name, parsed) else: raise ValueError( '`args_with_values` contains unsupported param group ' + arg['for']) name_pair = _.find(args, lambda pair: 'name' in pair[0]) name = name_pair[1] if name_pair else '' runner = Runner(device=device, paths=paths, train_params=train_params, model_params=model_params, run_params=run_params) runner.run()
def ManageRecordSignal(self, id, item): hit_1 = pydash.find(self._devices_list, {"hardwareID": id}) if (hit_1 != None): hit_2 = pydash.find(hit_1["streams"], {"id": item["id"]}) if (hit_2 != None): if (hit_2["subscribed"] == True ): # Change to True after testing value = self.SerializeValue(item["format"], item["value"]) self.NewRecordSignal(value, id, {"id": item["id"]})
def update_object_property(oid, pid): body = request.json hit_oid = py_.find(objects['thing-descriptions'], {'oid': oid}) if hit_oid: hit_values = py_.find(hit_oid['values'], {'pid': pid}) hit_values['value'] = body['value'] hit_values['timestamp'] = datetime.datetime.now().isoformat() return jsonify(py_.omit(hit_values,'pid')) else: content = {'please move along': 'nothing to see here'} abort(404)
def Unsubscribe(self, device_id, profile): hit_1 = pydash.find(self._devices_list, {"hardwareID": device_id}) if (hit_1 != None): hit_2 = pydash.find(hit_1["streams"], {"id": profile["id"]}) if (hit_2 != None): hit_2["subscribed"] = False self._logger.info("Unsubscribed from " + device_id + " - " + profile['id']) else: ProtocolException( "Component not found (subscription to device ID) " + device_id) else: ProtocolException( "Component not found (subscription to device ID) " + device_id)
def search_pre(self, files, task_id, file_dir, curr_file, curr_num): # 搜索上一个 pre_num = str(int(curr_num.split('-')[0]) - 1) task_id_prefix = f'{task_id}.' if self.aios_redis.get(f'plus_uploader:{task_id}:{pre_num}'): pre_file = f'{task_id_prefix}{pre_num}' if pre_file in files: self.aios_print(f'合并文件A: [{pre_file}] <= [{curr_file}]') new_file = self.zip_region( f'{task_id_prefix}{pre_num}-{curr_file.replace(task_id_prefix, "")}' ) self.merge_part_files(file_dir, pre_file, curr_file, new_file) curr_file = new_file _.remove(files, lambda x: x == pre_file) else: pre_file = _.find(files, lambda x: re.match(f'.*\.\d+-{pre_num}', x)) if pre_file: self.aios_print(f'合并文件B: [{pre_file}] <= [{curr_file}]') pre_num_joint = pre_file.replace(task_id_prefix, '') new_file = self.zip_region( f'{task_id_prefix}{pre_num_joint}-{curr_file.replace(task_id_prefix, "")}' ) self.merge_part_files(file_dir, pre_file, curr_file, new_file) curr_file = new_file _.remove(files, lambda x: x == pre_file) return curr_file
def _scrape(self, **kwargs): json = get_json(self.DATA_URL) state_info = pydash.get(json, 'state_testing_results.values.-1') demographics_data = pydash.get(json, 'demographics.race') aa_data = aa_data = pydash.find( demographics_data, lambda data: data['description'] == 'Black') date = datetime.strptime(state_info['testDate'], '%m/%d/%Y').date() cases = state_info.get('confirmed_cases') deaths = state_info.get('deaths') aa_cases = aa_data.get('count') aa_deaths = aa_data.get('deaths') assert cases, 'Could not find number of confirmed cases' assert deaths, 'Could not find number of deaths' assert aa_cases, 'Could not find number of AA cases' assert aa_deaths, 'Could not find number of AA deaths' pct_aa_cases = to_percentage(aa_cases, cases) pct_aa_deaths = to_percentage(aa_deaths, deaths) return [ self._make_series(date=date, cases=cases, deaths=deaths, aa_cases=aa_cases, aa_deaths=aa_deaths, pct_aa_cases=pct_aa_cases, pct_aa_deaths=pct_aa_deaths, pct_includes_unknown_race=True, pct_includes_hispanic_black=False) ]
def boost_page_rank(ranks, word_index, docs, doc_list): if not should_boost: return ranks # index doc_list by the word_id for quick lookup indexed_doc_list = {doc['word_id']: doc['doc_id_list'] for doc in doc_list} # index word_index by word -> word_id word_index = {word['word']: word['word_id'] for word in word_index} # try to boost rank for each document for doc in docs: doc_id = doc['doc_id'] url = doc['doc'] text = doc['title'].lower() # iterate through search words and boost for each applicable factor for word in word_index: # boost for word occurance frequency, this also accounts for # intersection between words in a url, as a url with overlapping # words will be boosted more by the virtue of word frequency count = _.find(indexed_doc_list[word_index[word]], {'doc_id': doc_id}) count = count['count'] if count is not None else 0 factor = boost_factor['word_frequency'] * count ranks[doc_id] *= factor # boost for title if text.find(word) != -1: ranks[doc_id] *= boost_factor['title'] print 'can boost title!', text, word, doc_id, boost_factor['title'] # boost for url if url.find(word) != -1: ranks[doc_id] *= boost_factor['url'] print 'can boost url!', url, word, doc_id, boost_factor['url'] return ranks
def update_answer(question, answers_params): for answer_param in answers_params: n = answer_param['n'] if n <= 0: FeatureCtrl.create_answer(question, answer_param) else: q = Session.query(Answer).filter(Answer.n == n) if 'txt' in answer_param: q.update({'txt': answer_param['txt']}) # Удаление пустых вопросов for answer_param in answers_params: if 'n' in answer_param and n > 0 and \ 'txt' in answer_param and answer_param['txt'].strip() == '': n = answer_param['n'] # print('delete answer %s' % (n,)) Session.query(Answer).filter(Answer.n == n).delete() answers = Session.query(Answer).filter( Answer.question_n == question.n).all() for answer in answers: test_answer = find(answers_params, {'n': answer.n}) if test_answer is None: # print('delete answer %s' % (answer.n,)) Session.query(Answer).filter(Answer.n == answer.n).delete()
async def on_perform(self, context, args): type = pydash.find(args, {'key': 'type'})['value'] timeout = pydash.find(args, {'key': 'timeout'})['value'] if type == 'duration': print('start to waiting... for {}sec'.format(timeout / 1000)) waiter = Waiter(context.event_loop, timeout) await waiter.wait() return True elif type == 'signal': return await self.type_signal(context, args) elif type == 'human_input': return await self.type_human_input(context, args) else: print('unknown wait type') return False
def __init__(self) -> None: self.storage = {} for name in config['app']['considering_exchanges']: starting_assets = config['env']['exchanges'][name]['assets'] fee = config['env']['exchanges'][name]['fee'] exchange_type = config['env']['exchanges'][name]['type'] if exchange_type == 'spot': self.storage[name] = SpotExchange(name, starting_assets, fee) elif exchange_type == 'futures': settlement_currency = jh.get_config( f'env.exchanges.{name}.settlement_currency') # dirty fix to get the settlement_currency right for none-USDT pairs settlement_asset_dict = pydash.find( starting_assets, lambda asset: asset['asset'] == settlement_currency) if settlement_asset_dict is None: starting_assets[0]['asset'] = settlement_currency self.storage[name] = FuturesExchange( name, starting_assets, fee, settlement_currency=settlement_currency, futures_leverage_mode=jh.get_config( f'env.exchanges.{name}.futures_leverage_mode'), futures_leverage=jh.get_config( f'env.exchanges.{name}.futures_leverage'), ) else: raise InvalidConfig( 'Value for exchange type in your config file in not valid. Supported values are "spot" and "futures"' )
def post(self, req): try: body_unicode = req.body.decode('utf-8') post_data = json.loads(req.body) if not validate_captcha(post_data['captchaToken']): return HttpResponse(status=403) response = get_cv_data(req, { 'templateId': post_data['templateId'], 'colorProfileId': post_data['colorProfileId'], 'default': True, }) if not response.success: raise Exception('cv data not found for slug' + slug) context = { 'cv': response.data.cv } template = py_.find(read_config_json(['templates', 'TEMPLATES.json']).get('pdf', None), {'slug': post_data['templateId']}) self.send_confirm_email(req, post_data) return self.render_cv_pdf(context, template['path']) except BaseException as error: print(error) return JsonResponse({'success': False, 'errors': [str(error)]})
def prepath_split(prepath): ''' Split prepath into useful names. Works with predir (prename will be None) prepath: data/dqn_pong_2018_12_02_082510/dqn_pong_t0_s0 predir: data/dqn_pong_2018_12_02_082510 prefolder: dqn_pong_2018_12_02_082510 prename: dqn_pong_t0_s0 spec_name: dqn_pong experiment_ts: 2018_12_02_082510 ckpt: ckptbest of dqn_pong_t0_s0_ckptbest if available ''' prepath = prepath.strip('_') tail = prepath.split('data/')[-1] if '_ckpt' in tail: ckpt_chunk = ps.find(tail.split('_'), lambda s: s.startswith('ckpt')) tail = tail.replace(f'_{ckpt_chunk}', '') ckpt = ckpt_chunk.replace('ckpt', '') else: ckpt = None if '/' in tail: prefolder, prename = tail.split('/') else: prefolder, prename = tail, None predir = f'data/{prefolder}' spec_name = RE_FILE_TS.sub('', prefolder).strip('_') experiment_ts = RE_FILE_TS.findall(prefolder)[0] return predir, prefolder, prename, spec_name, experiment_ts, ckpt
def boost_page_rank(ranks, word_index, docs, doc_list): if not should_boost: return ranks # index doc_list by the word_id for quick lookup indexed_doc_list = {doc['word_id']: doc['doc_id_list'] for doc in doc_list} # index word_index by word -> word_id word_index = {word['word']: word['word_id'] for word in word_index} # try to boost rank for each document for doc in docs: doc_id = doc['doc_id'] url = doc['doc'] text = doc['title'].lower() # iterate through search words and boost for each applicable factor for word in word_index: # boost for word occurance frequency, this also accounts for # intersection between words in a url, as a url with overlapping # words will be boosted more by the virtue of word frequency count = _.find(indexed_doc_list[word_index[word]], {'doc_id': doc_id}) count = count['count'] if count is not None else 0 factor = boost_factor['word_frequency'] * count ranks[doc_id] *= factor # boost for title if text.find(word) != -1: ranks[doc_id] *= boost_factor['title'] print 'can boost title!', text, word, doc_id, boost_factor[ 'title'] # boost for url if url.find(word) != -1: ranks[doc_id] *= boost_factor['url'] print 'can boost url!', url, word, doc_id, boost_factor['url'] return ranks
def execute(self, database): SNAPSHOT_SOFTWARE = { 'DDI': 'Workbench', 'Scan': 'AndroidApp', 'DeviceHubClient': 'Web' } for snapshot in DeviceEventDomain.get({'@type': "devices:Snapshot"}): with suppress(KeyError): snapshot['snapshotSoftware'] = SNAPSHOT_SOFTWARE[snapshot.get( 'snapshotSoftware', 'DDI')] DeviceEventDomain.update_one_raw( snapshot['_id'], {'$set': { 'snapshotSoftware': snapshot['snapshotSoftware'] }}) for device in DeviceDomain.get({'events._id': snapshot['_id']}): materialized_snapshot = find( device['events'], lambda event: event['_id'] == snapshot['_id']) materialized_snapshot['snapshotSoftware'] = snapshot[ 'snapshotSoftware'] DeviceDomain.update_one_raw( device['_id'], {'$set': { 'events': device['events'] }})
def search_next(self, files, task_id, file_dir, curr_file, curr_num): # 搜索下一个 next_num = str(int(curr_num.split('-')[-1]) + 1) task_id_prefix = f'{task_id}.' if self.aios_redis.get(f'plus_uploader:{task_id}:{next_num}'): next_file = f'{task_id_prefix}{next_num}' if next_file in files: self.aios_print(f'合并文件A: [{curr_file}] => [{next_file}]') new_file = self.zip_region( f'{task_id_prefix}{curr_file.replace(task_id_prefix, "")}-{next_num}' ) self.merge_part_files(file_dir, curr_file, next_file, new_file) curr_file = new_file _.remove(files, lambda x: x == next_file) else: next_file = _.find( files, lambda x: re.match(f'.*\.{next_num}-\d+', x)) if next_file: self.aios_print(f'合并文件B: [[{curr_file}]] => [{next_file}]') next_num_joint = next_file.replace(task_id_prefix, '') new_file = self.zip_region( f'{task_id_prefix}{curr_file.replace(task_id_prefix, "")}-{next_num_joint}' ) self.merge_part_files(file_dir, curr_file, next_file, new_file) curr_file = new_file _.remove(files, lambda x: x == next_file) return curr_file
def process_path(path, request): default_language = settings.PARLER_DEFAULT_LANGUAGE_CODE prefix_default = settings.PREFIX_DEFAULT_LANGUAGE query = build_select_path_query(path) data, success, *_ = execute_query(query) path_item = py_.find(data, lambda item: item['lang'] == get_language()) if not path_item and len(data): lang_code = data[0]['lang'] protocol = request.scheme + '://' redirect = '' if lang_code == default_language and not prefix_default: redirect = request.get_host() + '/' + path else: redirect = request.get_host() + '/' + lang_code + '/' + path query = '?' + request.META.get('QUERY_STRING') if request.META.get( 'QUERY_STRING') else '' return {'redirect': protocol + redirect + query} if not path_item: return {'not_found': True} return { 'page_id': path_item.get('page_id'), 'pattern': path_item.get('pattern') }
def test_basic_with_default_config(self): actual = get_field_mapping() self.assertIsInstance(actual, list) self.assertIsInstance( find(actual, lambda x: x["source"] == "url"), dict, )
def Read(self, device_id, profile): hit_1 = pydash.find(self._devices_list, {"hardwareID": device_id}) if (hit_1 != None): hit_2 = pydash.find(hit_1["streams"], {"id": profile["id"]}) if (hit_2 != None): return self.SerializeValue(hit_2["format"], hit_2["value"]) else: ProtocolException( "Component not found (subscription to device ID) " + device_id) return bytearray([]) else: ProtocolException( "Component not found (subscription to device ID) " + device_id) return bytearray([])
def find_ckpt(prepath): '''Find the ckpt-lorem-ipsum in a string and return lorem-ipsum''' if 'ckpt' in prepath: ckpt_str = ps.find(prepath.split('_'), lambda s: s.startswith('ckpt')) ckpt = ckpt_str.replace('ckpt-', '') else: ckpt = None return ckpt
def test_Accelerometer(self): payload = "017104D2FB2E0000" b64 = codecs.encode(codecs.decode(payload, 'hex'), 'base64') cayenne = cayenne_parser.CayenneParser() out = cayenne.decodeCayenneLpp(b64, datetime.datetime.now()) hit = pydash.find(out, {"id": "AccelerometerX"}) self.assertTrue(hit) self.assertAlmostEqual(float(hit["value"]), 1.234, delta=0.1) hit = pydash.find(out, {"id": "AccelerometerY"}) self.assertTrue(hit) self.assertAlmostEqual(float(hit["value"]), -1.234, delta=0.1) hit = pydash.find(out, {"id": "AccelerometerZ"}) self.assertTrue(hit) self.assertAlmostEqual(float(hit["value"]), 0.0, delta=0.1)
def test_RelativeHumidity(self): payload = "016864" b64 = codecs.encode(codecs.decode(payload, 'hex'), 'base64') cayenne = cayenne_parser.CayenneParser() out = cayenne.decodeCayenneLpp(b64, datetime.datetime.now()) hit = pydash.find(out, {"id": "Relative_Humidity"}) self.assertTrue(hit) self.assertTrue(float(hit["value"]) == 50.0)
def test_Temperature(self): payload = "0167FFD7" b64 = codecs.encode(codecs.decode(payload, 'hex'), 'base64') cayenne = cayenne_parser.CayenneParser() out = cayenne.decodeCayenneLpp(b64, datetime.datetime.now()) hit = pydash.find(out, {"id": "Temperature"}) self.assertTrue(hit) self.assertTrue(float(hit["value"]) == -4.1)
def generate_rating(self, attributes, pos_index, rating_type): """Generate a value between 0 and 1 for performance and retention.""" position_name = POSITIONS[pos_index] weightings = WEIGHTINGS[rating_type][position_name] university_outcomes = pydash.find(UNIVERSITIES, ['name', attributes['university']])['outcomes'] major_outcomes = pydash.find(MAJORS, ['name', attributes['major']])['outcomes'] if rating_type == 'performance': return round(( weightings['university'] * university_outcomes[position_name]['performance'] + weightings['major'] * major_outcomes[position_name]['performance'] + weightings['GPA'] * attributes['GPA'] / GPA['maximum'] ), 2) elif rating_type == 'retention': return round(( weightings['university'] * university_outcomes[position_name]['retention'] + weightings['major'] * major_outcomes[position_name]['retention'] ), 2) else: raise Exception(rating_type + "is not a rating_type option.")
def get_plate_files(plate_name, cnt=-1): def parse_file_name(f): n = os.path.basename(f) parts = [f, n, plate_name] + NAME_PARSER.split(n)[1:6] if len(parts[6]) == 1: parts[6] = '0' + parts[6] frm = Frame._make(parts) return frm tree = LogHelper.time(lambda: [PathNode._make(t) for t in os.walk(ROOT_DIR + "\\data", topdown=False)]) plate = pydash.find(tree, lambda p: p.root.endswith(plate_name)) LogHelper.logText(plate.root) if plate.dirs: files = glob.glob(plate.root + "\\*\\*.tif") else: files = glob.glob(plate.root + "\\*.tif") if cnt > 0: files = random.sample(files, cnt) parsed = map(parse_file_name, files) return parsed
def test_find(case, expected): assert _.find(*case) == expected