def main(scenes_file, vocab_type): data_prefix = scenes_file.split('.')[0] filename = f'{data_prefix}_{vocab_type}_vocab.json' # if filename is None: # data_prefix = scenes_file.split('.')[0].split('_')[0] # if data_prefix != '': # filename = f'{data_prefix}_vocab.json' # else: # raise ValueError() get_set_func = {'attributes': get_attribute_names, 'objects': get_object_names} synset_file = {'attributes': 'attribute_synsets.json', 'objects': 'object_synsets.json'} all_synsets = json.load(open(synset_file[vocab_type], 'r')) scenes = json.load(open(scenes_file, 'r')) vocab = set() for scene_id, scene in scenes.items(): vocab = vocab.union(get_set_func[vocab_type](scene)) vocab_synsets = {} for word in vocab: try: synset = all_synsets[word] except KeyError: synset = None vocab_synsets[word] = synset json.dump(vocab_synsets, open(filename, 'w'), indent=4) print(f'Completed writing {filename}')
def test(self, start=False, port=5354): """ js9 'j.servers.dns.test()' """ if start or not self.ping(port=port): self.start(background=True, port=port) def ping(): from gevent import socket address = ('localhost', port) message = b'PING' sock = socket.socket(type=socket.SOCK_DGRAM) sock.connect(address) print('Sending %s bytes to %s:%s' % ((len(message), ) + address)) sock.send(message) data, address = sock.recvfrom(8192) print('%s:%s: got %r' % (address + (data, ))) assert data == b"PONG" ping() ns = j.tools.dnstools.get(["localhost"], port=port) print(ns.namerecords_get("google.com")) print(ns.namerecords_get("info.despiegk"))
def mentions(self, ids, year=None): ids = [ids] if isinstance(ids, str) else ids results = {} for key in ids: doi = key[4:] if key.lower().startswith('doi') else key params = { 'mailto': self.mailto, 'obj-id': doi, 'rows': 0, #'facet': 'source:*' } if year: params['from-occurred-date'] = '{}-01-01'.format(year) params['until-occurred-date'] = '{}-12-31'.format(year) try: response = requests.get(CROSSREF_EVENTS_URL, params=params) response.raise_for_status() except requests.exceptions.HTTPError: print('Error fetching events for {}'.format(key)) else: if 'json' in response.headers['Content-Type']: info = response.json() results[key] = info['message'].get('total-results', 0) return results
def bookone(): #查询一个图书 params = {"id": 1} url = 'http://127.0.0.1:8000/books/' r = requests.get(url, params=params) print(r.json()) return r
async def clusters_overview_handler(form: dict, request: 'Application.Request'): cloud_token = await auth(request, form["project_id"]) headers = {"X-Auth-Token": cloud_token, "Content-Type": "application/json"} url = CCE_CLUSTERS_URL.format(project_id=form["project_id"]) async with aiohttp.ClientSession() as session: async with session.get(url, headers=headers) as resp: print((resp.status, await resp.text())) resp = await resp.json() def inner(): for item in resp["items"]: yield { "uid": item["metadata"]["uid"], "name": item["metadata"]["name"], "update_at": date_parse( item["metadata"]["updateTimestamp"][:26]).timestamp(), "status": item["status"]["phase"], "flavor": item["spec"]["flavor"], } return web.json_response({"clusters": list(inner())})
async def aom_query_handler(form: dict, request: 'Application.Request'): form.setdefault("duration_sec", 5 * 60) cloud_token = await auth(request, form["project_id"]) headers = {"X-Auth-Token": cloud_token, "Content-Type": "application/json"} url = AOM_METRIC_DATA_URL.format(project_id=form["project_id"]) body = (BODY_OAM_METRIC_QUERY.replace("{{ METRIC_NAME }}", form["metric_name"]).replace( "{{ CLUSTER_NAME }}", form["cluster_name"]).replace( "{{ NAMESPACE }}", form["namespace"])) async with aiohttp.ClientSession() as session: print(url) async with session.post(url, headers=headers, data=body) as resp: resp = await resp.json() def inner(): for point in resp["metrics"][0]["dataPoints"]: yield { "timestamp": point["timestamp"], "unit": point["unit"], "average": point["statistics"][0]["value"], } return web.json_response({"points": list(inner())})
def listen(self): # print("Listening") event = self.incoming_socket.poll(timeout=3000) # wait 3 seconds if event == 0: # timeout reached before any events were queued pass else: # events queued within our time limit message = self.incoming_socket.recv_string() print("Message -> {}".format(message)) #role, topic, ipaddr = self.message.split() data = json.loads(message) role = data['role'] #publisher topic = data['topic'] #12345 ipaddr = data['ipaddr'] #10.0.0.1 if ipaddr not in self.registry[topic][ role]: #is 10.0.0.1 in registry['12345']['publisher'] self.registry[topic][role].append( ipaddr) #now it's added to the list ['10....1'] # based on our role, we need to find the companion ip addresses in the registry if role == 'publisher' or role == 'subscriber': if topic in self.topic_brokers: broker = self.topic_brokers[topic] else: for i in range(10): try: self.update_broker_registry('/broker') # Choose a broker at random if we don't know the topic broker = random.choice(self.registry['broker']) self.topic_brokers[topic] = broker except: time.sleep(0.1) if role == 'broker': broker = str(self.master_count) self.incoming_socket.send_string(broker)
def _get_single_statement_new(self, from_date, to_date): driver = self.driver params, target_url, user_agent = driver.execute_script(""" return (function() { var form = document.forms[0]; var es = [].map.call($(form).serializeArray(), function(e) { return [e.name, e.value]; }); return [es, form.getAttribute('action'), navigator.userAgent]; })(); """) params = dict(params) from pprint import pprint as print params.update({ 'frmTest:btn_Export': '', 'frmTest:dtSearchToDate': '{:%Y}'.format(to_date), 'frmTest:dtSearchToDate.month': '{:%m}'.format(to_date), 'frmTest:dtSearchToDate.year': '{:%d}'.format(to_date), 'frmTest:dtSearchFromDate': '{:%Y}'.format(from_date), 'frmTest:dtSearchFromDate.month': '{:%m}'.format(from_date), 'frmTest:dtSearchFromDate.year': '{:%d}'.format(from_date) }) print(params) cookies = { c['name']: c['value'] for c in driver.get_cookies() }
def main(min_num=1, max_num=51): """Main function to start the program will gather the case information with case activity and case details from the website. Note that you will have to create a MySQL database with a table called Case and contains these four rows: id (int, primary key), case_id (varchar), plaintiffs (varchar), defendants (varchar), filing_date (datetime) and case_activity (varchar, better be long) :param min_num: the min number to start with :param max_num: the max number to end with, notice that you have to plus one to the actual number you want 'cause its in the range() function """ print('Getting case data...') start = time.time() # The threads list pool = multiprocessing.Pool(10) # Append all the threads for i in range(min_num, max_num): pool.apply_async(spyder, args=('1998', 'D', str(i).zfill(6), storeToDatabase)) pool.close() pool.join() # # Start all threads # for t in tqdm(thread): # t.start() # # Let the parent thread be idle before all its child threads are finished # for t in tqdm(thread): # t.join() end = time.time() total = end - start print('Total time cost: %.2fs' % total)
def setUp(self): print("setUp") self.psys = PluginSystem() self.psys.scan() #self.psys.load('glyr') #self.psys.load('lastfm') print(self.psys.__dict__)
def load_podcasts(server, test): with open('.tenant.{0}.yml'.format(server), 'r') as tenant_yml: config = yaml.load(tenant_yml) client = PathgatherClient(config['pathgather_host'], config['pathgather_token']) for podcast, name in PODCAST_URLS: feed = feedparser.parse(podcast) for m in feed.entries: try: if not test: c = client.content.create( name=m['title'], content_type=pathgather.types.ContentType.MEDIA, source_url=m['link'], provider_id=name, topic_name=None, level=pathgather.types.SkillLevel.ALL, custom_id=None, description=m['summary'], image=feed['feed']['image']['href'], tags=None, enabled=True, skills=[tag['term'] for tag in m['tags']], duration=m.get('itunes_duration', 0)) print("Added content {0}".format(c)) except pathgather.exceptions.PathgatherApiException as p: print("Failed to create content: {0}".format(p.message))
def create(self, validated_data: Any): print(validated_data) choices = validated_data.pop('choices', None) question = Question.objects.create(**validated_data) for choice in choices: question.choices.create(**choice) return question
def update_section(section, key, value): """ recurses into a nested dictionary or nested list and changes a value uses the "key" parameter thusly: "somekey-1-2" would update the 3 in {"somekey": [10, [1, 2, 3]]} :param section: nested dictionary or list :param key: string of recurse keys :param value: value to set the searched value for. """ splitted = key.split("-", 1) if len(splitted) == 1: if type(section) is list: key = int(key) if type(section[key]) is bool or value.lower() in ("true", "false", "on", "off"): section[key] = translate_bool(value) else: section[key] = type(section[key])(value) print(section[key]) else: sectkey, key = splitted if type(section) is list: sectkey = int(sectkey) update_section(section[sectkey], key, value)
def get_currency_data(api_key, fake=False, fakeonerror=False): if fake: return CURRENCIES else: url = "http://apilayer.net/api/live?access_key={}".format(api_key) r = j.tools.http.get(url) try: json_res = r.json() data = json_res["quotes"] data["USDETH"] = 1 / cc.get_price("ETH", "USD")["ETH"]["USD"] data["USDXRP"] = cc.get_price("USD", "XRP")["USD"]["XRP"] data["USDBTC"] = 1 / cc.get_price("BTC", "USD")["BTC"]["USD"] normalized_data = { k.lower().lstrip("usd"): v for k, v in data.items() } return normalized_data except Exception as e: print("error happened") if not fakeonerror: raise e else: return CURRENCIES
def cleanup(): """ stops create_ap service on exit. """ # stop the access point when we exit. os.system("systemctl stop create_ap.service") print("create_ap stopped gracefully")
def capture_preview(serialnumber: str) -> bool: """ capture a preview image once. todo: see :func:`cap_lock_wait` :param serialnumber: :return: whether the capture was a success :rtype: bool """ try: a = subprocess.check_output("gphoto2 --auto-detect", shell=True).decode() for port in re.finditer("usb:", a): port = a[port.start():port.end() + 7] cmdret = subprocess.check_output('gphoto2 --port "' + port + '" --get-config serialnumber', shell=True).decode() _serialnumber = cmdret[cmdret.find("Current: ") + 9:len(cmdret) - 1] if _serialnumber == serialnumber: tries = 0 while tries < 10 and cap_lock_wait(port, serialnumber): tries += 1 time.sleep(1) return True except subprocess.CalledProcessError as e: print(str(e)) return False
async def get_token_by_credentials( login: str, password: str, domain_name: str, project_id: str = '', *, scope: Literal["project", "domain"] = "project" ) -> tuple[[tuple[Optional[str], Optional[datetime.datetime]]], Optional[str]]: body = (BODY_LOGIN.replace("{{ LOGIN }}", login).replace( "{{ PASSWORD }}", password).replace("{{ DOMAIN_NAME }}", domain_name).replace( "{{ SCOPE }}", project_id if scope == "project" else domain_name).replace( "{{ SCOPE_LABEL }}", scope).replace("{{ SCOPE_TARGET }}", "id" if scope == "project" else "name")) print(body) async with aiohttp.ClientSession() as session: async with session.post(TOKEN_URL, data=body) as resp: if "X-Subject-Token" not in resp.headers: logging.info( f"invalid credentials ({login = }, {password = }, " f"{domain_name = }, {project_id = }, {scope = })") return ( None, None ), 'Invalid credentials. Check logs' # Нам дали левые credentials cloud_token: str = resp.headers["X-Subject-Token"] expires_at: datetime.datetime = date_parse( (await resp.json())["token"]["expires_at"]) return (cloud_token, expires_at), None
def capture_view(): user_form = UserInputForm(request.form) try: lookup = [(idx, x) for idx, x in enumerate(session["camera_config"]["image_size_list"])] reverselookup = dict([(str(v), k) for k, v in lookup]) user_form.image_size.choices = lookup except: pass if user_form.validate() and user_form.submit.data: print(user_form.data) for k in [x for x in vars(user_form) if not x.startswith("_") and not x == "meta"]: if k == "image_size": session["image_width"], session["image_height"] = user_form[k].data session[k] = user_form[k].data for k, v in session.items(): try: user_form[k].data = v except: pass try: user_form.image_size.data = reverselookup[str(session.get("image_size"))] except: pass user_editable = ["1st_corner", "2nd_corner", "overlap", "scan_order", "image_height", "image_width"] template_data = {"form": user_form} return render_template("capture.html", **template_data)
def sort_followers(db_name): # f = open('data/alltime.txt', 'w') db = pymongo.MongoClient('127.0.0.1:27017').get_database(db_name) for colname in q_colls: collection = db.get_collection(colname) for qdoc in collection.find(): if qdoc['qid'] not in qids: continue print(qdoc['qid']) followers = qdoc['follower'] follower_ids = set() if len(followers) <= 1: continue # remove duplicates distinct_followers = [] for follow in followers: if follow['uid'] not in follower_ids: follower_ids.add(follow['uid']) distinct_followers.append(follow) followers = distinct_followers for index, follow in enumerate(followers): print(index) if follow['time'] is None: # 暂时不管那些有None的 low = min(followers[index-1]['time'], followers[index+1]['time']) high = max(followers[index-1]['time'], followers[index+1]['time']) follow['time'] = low + (high - low) / 2 collection.update( {'qid': qdoc['qid']}, {'$set': {'follower': followers}} )
def stream_image(fn): """Video streaming generator function.""" while True: frame = "" try: img = cv2.imread(fn,cv2.IMREAD_COLOR) b,frame = cv2.imencode(".jpg", img) frame = frame.tostring() # with open(fn, 'rb') as f: # frame = f.read() except Exception as e: print(str(e)) img = Image.new("RGB", (512, 256)) draw = ImageDraw.Draw(img) pos = float(time.time() * 1000) / 1000 pos = (1 + math.sin(pos)) / 2 pos = pos * (img.size[0] - 50) draw.multiline_text((pos, img.size[1] / 2), "NO IMAGE", fill="white") del draw b = io.BytesIO() img.save(b, "JPEG") b.seek(0) frame = b.read() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def check_Login(self): if self.isLoggedIn: print('user already logged in!') res=input('user needs to logged out? ') if res =="1": self.logout() return
def scrap_normal(x): for _ in range(tables[2].shape[0]): try: updatedict(int(tables[2].iloc[x, 1]), int(tables[2].iloc[x, 2]), tables[2].iloc[x, 3]) except: print("Error: Format Error") x += 1
def handle_dead_lock(root, path, old_tree_paths, handled_paths, op_ls): # path not in old_tree_paths with entity == -1 # otherwise +1 assert path in old_tree_paths assert path not in handled_paths leaf = get_node(root, path) try: insert(leaf, Exception) # must fail except NeedTrinodeRestructorError as e: # fail # bug: # follow black_uncle contains the new node with entity Exception! black_uncle_with_modified_parent, = e.args black_uncle_path = get_attr_path(black_uncle_with_modified_parent) black_uncle = get_node(root, black_uncle_path) begin = len(op_ls) red_uncle = to_red(black_uncle, old_tree_paths, op_ls) another_root = red_uncle.fixed_the_only_broken_above_until_root() root = apply_op_ls(root, op_ls[begin:]) try: assert root.underlying == another_root.underlying except: from pprint import pprint as print print(root.underlying[0]) print(another_root.underlying[0]) raise new_inserted_paths = set(path for _, _, path in op_ls[begin:]) new_inserted_old_paths = old_tree_paths & new_inserted_paths return new_inserted_old_paths, root, black_uncle_path else: raise ...
def run_one(): bag_dict = get_bags() bag_dict = { bag: content is not None and tuple( (content_bag[1] for content_bag in content)) or None for bag, content in bag_dict.items() } bags = collections.deque(bag_dict.keys()) while bags: bag = bags.popleft() content = bag_dict[bag] if content is None: pass elif 'shiny gold' in content: bag_dict[bag] = True else: if any((bag_dict[content_bag] == True for content_bag in content)): bag_dict[bag] = True else: content = tuple((content_bag for content_bag in content if bag_dict[content_bag] is not None)) if content: bag_dict[bag] = content bags.append(bag) else: bag_dict[bag] = None bags = (bag for bag, content in bag_dict.items() if content == True) print(len(list(bags)))
def run(self): print("Running") while True: #print("Listen - Check registry - Check Load") self.listen() self.check_registry() self.check_load()
def daily_test(): """ downloads every sensor and runs the full test on every one """ api_data = scraper.get_sensors_with_obs_type() print("got sensors, beginning to run tests on each sensor") sensors = pd.DataFrame(api_data) out = sensors.apply(full_sensor_test, axis=1, test_delta_array=[1 / 24, 1, 3], save_plots=True) # break apart the output and save it in the dataframe sensors["train_residuals"] = out.apply(lambda x: x[0] if x else None) sensors["test_residuals_1hour"] = out.apply(lambda x: x[1][0] if x else None) sensors["test_residuals_1day"] = out.apply(lambda x: x[1][1] if x else None) sensors["test_residuals_3days"] = out.apply(lambda x: x[1][2] if x else None) sensors["num_test_vals"] = out.apply(lambda x: x[2] if x else 0) sensors["flag_1hour"] = out.apply(lambda x: x[3][0] if x else False) sensors["flag_1day"] = out.apply(lambda x: x[3][1] if x else False) sensors["flag_3days"] = out.apply(lambda x: x[3][2] if x else False) sensors["flag_min_vals"] = out.apply(lambda x: x[3][3] if x else False) sensors = sensors.sort_values("num_test_vals", ascending=False) sensors["run_date"] = datetime.datetime.utcnow() return sensors
def booksadd(): #查询所有图书 url = 'http://127.0.0.1:8004/books/' data = {"btitle": "射雕英雄传1", "bpub_date": "1980-05-01"} data = json.dumps(data) #将字典转化成json r = requests.post(url, data=data) print(r.json()) return r
def spyder(case_year, division_code, case_id, store_to=None, print_out=True): """Get the case detail and store it :param case_year: str, the year of the given case, will be passed to `getCaseDetails` :param division_code: str, the division code of the given case, will be passed to `getCaseDetails` :param case_id: str, the id of the given case, will be passed to `getCaseDetails` :param store_to: function or method, will be run after gets the result. If None, then skip the store part. Defaults to None :param print_out: bool, weather to print the result out to the terminal or not. Defaults to False :return: int, 0 for success, 1 for failure """ try: # Define the result _ = getCaseDetails(case_year, division_code, case_id) # If the result is not empty (or not found) if _ != {}: # Append it to the global result list results.append(_) if print_out: print(_) elif print_out: # Otherwise, it means not found / other reasons print('Not found: %s%s%s' % (case_year, division_code, case_id)) # Check the store_to param if store_to is not None: # Store it store_to(_) # Success return 0 except: # Failed # raise return 1
def run_one(): group_declarations = get_declarations() group_declarations = (declaration.replace('\n', '') for declaration in group_declarations) group_declarations = map(set, group_declarations) group_declarations = map(len, group_declarations) print(sum(group_declarations))
def debug(self, *args, **kwargs): op = self._peek() if has_implement_protocol(op, 'debug'): op.debug(*args, **kwargs) else: print(self.to_data()) return self
def path_midpoint(self, start, end, route_index=0): start, end = self.locations[start], self.locations[end] if start == end: return self.locations[start] routes = self.client.directions(start, end) route = routes[min(len(routes), route_index)]['legs'][0] steps = route['steps'] print(route.keys()) route_length = route['distance']['value'] distance_traveled = 0 steps_taken = [{'distance':{'value': 0}}] while distance_traveled + steps_taken[-1]['distance']['value'] \ < route_length / 2: distance_traveled += steps_taken[-1]['distance']['value'] steps_taken.append(steps.pop()) print(distance_traveled, route_length, len(steps)) mid_step = steps_taken[-1] polyline = googlemaps.convert.decode_polyline(mid_step['polyline'] ['points']) polyline_points = [(p['lat'], p['lng']) for p in polyline] traveled_points = [] while distance_traveled < route_length / 2: traveled_points.append(polyline_points.pop()) if len(traveled_points) > 1: arg = traveled_points[-1] + traveled_points[-2] \ + (EARTH_RADIUS,) + (True,) distance_traveled += math.great_circle_dist(*arg) * 1000 args = traveled_points[-1] + traveled_points[-2] + (0.5,) + (True,) return math.great_circle_fraction(*args)
def largestSumOfAverages(self, A, K): """ :type A: List[int] :type K: int :rtype: float """ if K == len(A): return sum(A) # initial # because maybe often culculate average, so pre calculate all the average average = [[0] * (len(A) + 1) for _ in range(len(A))] for i in range(len(A)): for j in range(i + 1, len(A) + 1): average[i][j] = sum(A[i:j]) / (j - i) if K == 1: return average[0][len(A)] # tableing:dp[k][j] meaning the best solution of subQ : A[:j] split to k dp = copy(average[0][1:]) print(dp) for k in range(1, K - 1): for j in range(len(A) - 1, k - 1, -1): if j == k: dp[j] = sum(A[:j + 1]) else: dp[j] = max(dp[i] + average[i + 1][j + 1] for i in range(k - 1, j)) print(dp) # return return max(dp[i] + average[i + 1][len(A)] for i in range(K - 2, len(A) - 1))
async def eye_query_handler(form: dict, request: 'Application.Request'): form.setdefault("duration_sec", 5 * 60) cloud_token = await auth(request, form["project_id"]) headers = {"X-Auth-Token": cloud_token} url = EYE_METRIC_DATA_URL.format(project_id=form["project_id"]) async with aiohttp.ClientSession() as session: print(url) async with session.get( url, headers=headers, params=( params := { "namespace": form["namespace"], "metric_name": form["metric_name"], "from": 1000 * int(time.time() - form["duration_sec"]), "to": 1000 * int(time.time()), "period": best_period(form["duration_sec"]), "filter": "average", # todo: только avg? "dim.0": await metric_name_to_dim(form["metric_name"], form["project_id"], cloud_token), })) as resp: print(params) return web.json_response(await resp.json())
def dump_into_collector( baton, address, date, logs_port, logs_query, stats_port, stats_query, stats_database, stats_precision, last_collector): baton.change_collector( address, logs_port, logs_query, stats_port, stats_query, stats_database, stats_precision) jobs = set(baton.refresh().list_jobs()) arguments = datetime.fromtimestamp(date / 1000).strftime(DATE_FORMAT).split() statuses = { 'send_stats': 'Not installed', 'send_logs': 'Not installed', } print('The send_stats and send_logs jobs will be launched to dump ' 'collected data from Agent to Collector {}'.format(address)) for job in statuses: if job in jobs: status = 'Not Started' job_id = baton.refresh().start_job_instance(job, -1, 0, 0, arguments, 'now') while True: status = baton.refresh().status_job_instance(job, job_id) print('Job {} is {}'.format(job, status)) if status not in ('Running', 'Scheduled'): break time.sleep(2) statuses[job] = status baton.refresh().change_collector(last_collector, **COLLECTOR_DEFAULTS) return statuses
def run(self): username, password = self.shuru() uid, token = self.denglu(username, password) start = time.time() self.zuanshi(uid, token) end = time.time() print("响应时间为:" + str(end - start))
def sigma0(self): max_dist = 0 for i in self.locations: i = np.array(i) for j in self.locations: j = np.array(j) dist = sum((i-j)**2)**0.5 if dist > max_dist: max_dist = dist print(max_dist) return 1 / 30 * max_dist
def search_by_songs(name): params = { 's' : name, 'type' : 1, 'offset' : 0, 'limit' : 10 } params = parse.urlencode(params).encode('utf8') resp = request.urlopen(search_url, params).readall() data = json.loads(resp.decode()) print(data)
def dataRoute(r, s, n): queue = [(1000,[(r,r)])] paths = [] while queue: print(len(queue)) m,p = queue.pop() for i, N in enumerate(n[p[-1][1]]): if N: if i == s: paths.append((min(m,N),p+[(p[-1][1],i)])) else: queue.append((min(m,N),p+[(p[-1][1],i)])) print(paths)
def construct_template_info(repo_path, setup_info, user_config=None, setup_deps=None): if setup_deps is None: setup_deps = {} if user_config is None: user_config = {} template_info = {} template_info['packagename'] = user_config.get('name', setup_info.get('name', '')) print('setuppy_path = %s' % repo_path) version_string = git.git_describe(repo_path, 'v', '.post') import subprocess if '.post' in version_string: full_hash = subprocess.check_output(['git', "rev-parse", "HEAD"], cwd=repo_path).decode() template_info['source_rev'] = full_hash else: template_info['source_rev'] = version_string template_info['packageversion'] = version_string if version_string: template_info['git'] = True # allow the user to overwrite the source url template_info['source_url'] = user_config.get('url', setup_info.get('url', '')) # allow the user to overwrite the build number template_info['build_number'] = user_config.get('build_number', 0) build_requirements = setup_deps.get('required', {}) # remove blacklisted packages from the build build_requirements = [dep for dep in build_requirements if dep not in user_config.get('blacklist_packages', [])] # add the build deps to the template info template_info['build_requirements'] = build_requirements if 'numpy' in build_requirements: build_string = NPY_BUILD_STRING else: build_string = PY_BUILD_STRING # allow the user to overwrite the build string template_info['build_string'] = user_config.get('build_string', build_string) # allow the user to overwrite the home url template_info['home_url'] = user_config.get('home_url', template_info['source_url']) template_info['license'] = setup_info.get('license', '') return template_info
def find_test_imports(importable_lib_name, iterable_of_deps_tuples): print('finding test imports') paths = [] def into_importable(full_module_path, lib_name): relative_module_path = full_module_path.split(lib_name, maxsplit=1)[1] # trim the '.py' relative_module_path = relative_module_path[:-3] # add the library name back in relative_module_path = lib_name + relative_module_path importable_path = '.'.join(relative_module_path.split(os.sep)) # turn imports from 'foo.bar.baz.__init__' into 'foo.bar.baz' if importable_path.endswith('__init__'): importable_path = importable_path[:-9] # turn the string from path/to/module to path.to.module return importable_path all_imports = [into_importable(full_path, importable_lib_name) for mod_name, full_path, catcher in iterable_of_deps_tuples] return sorted(all_imports)
def extract_keywords(sentence): sentence = sentence.lower() not_stopw = ["no", "nor", "not", "over", "under", "again", "further", "but", "against", "too", "very"] stopw = stopwords.words('english') for x in not_stopw: stopw.remove(x) print(stopw) pattern = re.compile(r'\b(' + r'|'.join(stopw) + r')\b\s*') sentence = sentence.replace('\n', '') sentence = sentence.replace("n't", " not") sentence = clean_string(sentence) sentence = pattern.sub('', sentence) stemmer = Stemmer() s = [stemmer.stem(w) for w in sentence.split()] b = zip(*[s[i:] for i in [0, 1]]) b = [bigram[0] + " " + bigram[1] for bigram in b] return s + b
def overlap(self, source1, source2, sink): source1= self.locations[source1] source2 = self.locations[source2] sink = self.locations[sink] from pprint import pprint as print try: poly1 = self.client.directions(source1, sink)[0]['overview_polyline'] poly2 = self.client.directions(source2, sink)[0]['overview_polyline'] except IndexError: return [] poly1 = googlemaps.convert.decode_polyline(poly1['points']) print(poly1) poly2 = googlemaps.convert.decode_polyline(poly2['points']) overlap = [(i['lat'], i['lng']) for i in poly1 if i in poly2] return overlap
def set(corner): if corner in [1, 2]: try: pt = get_pan_tilt() except Exception as e: print(str(e)) return jsonify(messages=[{"message": "Couldnt access the url: {}".format( str(e).replace("<", "").replace(">", "")), "classsuffix": "danger"}]) if corner == 1: session["1st_corner"] = pt return jsonify(messages=[{"message": "Set first corner to {},{}".format(*pt), "classsuffix": "success"}]) elif corner == 2: session["2nd_corner"] = pt return jsonify(messages=[{"message": "Set second corner to {},{}".format(*pt), "classsuffix": "success"}]) else: return jsonify(messages=[{"message": "mystery corner?", "classsuffix": "danger"}])
def test_load_by_name(self): print("test_load_by_name") #print(self.psys._module_list) self.psys.load('glyr') self.psys.load('lastfm') print(self.psys._loaded_list) print(len(self.psys._loaded_list)) self.assertTrue(len(self.psys._loaded_plugins) == 2)
def index(request): template = loader.get_template('systran/index.html') if request.method == 'POST': form = OpinionForm(request.POST) if form.is_valid(): try: sentence = form.cleaned_data['sentence'] dic = read_dic('dic_98') s = extract_keywords(sentence) for word in s: print(word) try: dic[word] += 1 except: pass l = [] with open('test', 'w') as f: f.write("0") i = 1 for key, value in dic.items(): if value != 0: print('{} ({}) : {}'.format(key, i, value)) f.write(" {}:{}".format(i, value)) i += 1 f.write('\n') call(["./svm_classify", "test", "model", "tmp"]) with open('tmp', 'r') as r: pred = r.readline() template = loader.get_template('systran/results.html') context = RequestContext(request, {'sentence': sentence, 'prediction': pred, 'keywords' : s}) return HttpResponse(template.render(context)) except Exception as e: print(e) return HttpResponseRedirect('/systran/error') else: return HttpResponseRedirect('/systran/error') else: form = OpinionForm() context = RequestContext(request, {'form': form}) return HttpResponse(template.render(context))
def play(sid): detail_url = r"http://music.163.com/api/song/detail/?id={0}&ids=['{0}']".format(sid) resp = request.urlopen(detail_url).readall() data = json.loads(resp.decode()) try: mp3_url = data['songs'][0]['mp3Url'] name = data['songs'][0]['name'] print ("Start buffering....") data = request.urlopen(mp3_url).readall() try: f = open(name + '.mp3', "wb") f.write(data) f.close() print ("Downloaded successfully!") return name + '.mp3' except: pass except: pass print ('Cache Failed!') return False
def play(sid): detail_url = r"http://music.163.com/api/song/detail/?id={0}&ids=['{0}']".format(sid) resp = request.urlopen(detail_url).readall() data = json.loads(resp.decode()) # try: mp3_url = data['songs'][0]['mp3Url'] name = data['songs'][0]['name'] print ("Start buffering.......") data = request.urlopen(mp3_url).readall() # try: f = open(name + '.mp3', "wb") f.write(data) f.close() print ("Downloaded successfully!") print (f) # os.startfile(name + '.mp3') sound = pyglet.media.load(name + '.mp3', streaming=True) sound.play() pyglet.app.run() return name + '.mp3'
def execute(args, parser): """To match the API of conda-build""" print("I'm supposed to make a meta.yaml file now.") print('args = %s' % args) execute_programmatically(args.skeletor_config, args.source, args.output_dir)
.replace('\\r', '\r')\ .replace('\\\\', '\\') filename = sys.argv[1] repos = defaultdict(list) with open(filename, encoding='UTF-8') as csv_file: reader = csv.reader(csv_file, quoting=csv.QUOTE_ALL) for row in reader: try: repo, sha, time_str, message_raw, status = row except ValueError as e: # Unexpected pprint from pprint import pprint as print print(row) raise e time = datetime.fromtimestamp(int(time_str) // 10**6) message = unescape_message(message_raw) tokens = tokenize(message) commit = Commit(repo=repo, sha=sha, time=time, message=message, tokens=tokens, status=status) if commit.is_valid: repos[commit.repo].append(commit) with open('commits.pickle', 'wb') as pickle_file: pickle.dump(repos, pickle_file)
# the domain with `/` and the parameters are separated from the path # with `?`. If there are multiple parameters they are separated from # each other with a `&`. # # For example, we can define the domain and path of the collections URL # as follows: # + museum_domain = 'https://www.harvardartmuseums.org' collection_path = 'browse' collection_url = (museum_domain + "/" + collection_path) print(collection_url) # - # Note that we omit the parameters here because it is usually easier to # pass them as a `dict` when using the `requests` library in Python. # This will become clearer shortly. # # Now that we've constructed the URL we wish interact with we're ready # to make our first request in Python. # + import requests collections1 = requests.get( collection_url, params = {'load_amount': 10,
# + from lxml import html floor_plan = requests.get('https://www.harvardartmuseums.org/visit/floor-plan') floor_plan_html = html.fromstring(floor_plan.text) # - # 3. Use the `XPath` you identified in step one to select the HTML list item # containing level one information. level_one = floor_plan_html.xpath('/html/body/main/section/ul/li[5]/div[2]/ul')[0] # 4. Use a *for loop* or *list comprehension* to iterate over the # sub-elements of the list item you selected in the previous step and # extract the text from each one. print([element.text_content() for element in level_one]) # 5. Bonus (optional): Extract the list of facilities available on each level. # + level_html = floor_plan_html.xpath('/html/body/main/section/ul/li') level_info = [[element.text_content() for element in level.xpath('div[2]/ul')[0]] for level in level_html] print(level_info)
if retweeted(tweet): return source_status_id(tweet) in ebooks_retweet_ids or ('@'+screen_name in tweet.text) else: return ( tweet.text.strip().startswith(('@','.','RT')) or tweet.id < max_ebooks_id or tweet.created_at < max_ebooks_timestamp or tweet.text in ebooks_tweet_ids ) # build a map of old tweet ids to new tweet ids recent_source_tweets = sorted([tweet for tweet in source_tweets if not old_tweet(tweet)], key=lambda t: t.created_at) if '--dry-run' in sys.argv: for tweet in recent_source_tweets: if retweeted(tweet): print("retweet {}, #{}".format(tweet.text, source_status_id(tweet))) else: print("tweet:{} irt:{}".format(tweet.text, new_tweet_ids.get(tweet.in_reply_to_status_id))) else: for tweet in recent_source_tweets: if retweeted(tweet): status = api.retweet(id=source_status_id(tweet)) else: text = html_parser.unescape(tweet.text) status = api.update_status(status=text, in_reply_to_status_id=new_tweet_ids.get(tweet.in_reply_to_status_id)) new_tweet_ids[tweet.id] = status.id
def execute_programmatically(skeletor_config_path, source_path, output_dir): with open(skeletor_config_path) as f: skeletor_config = yaml.load(f.read()) print('loaded config = %s' % skeletor_config) # find the dependencies for all modules in the source directory repo_deps = list(depfinder.iterate_over_library(source_path)) # Compile the regexers listed in the conda-skeleton.yml test_regexers = [re.compile(reg) for reg in skeletor_config.get('test_regex', [])] ignore_path_regexers = [re.compile(reg) for reg in skeletor_config.get('ignore_path_regex', [])] include_path_regexers = [re.compile(reg) for reg in skeletor_config.get('include_path_regex', [])] extra_setup_regexers = [re.compile(reg) for reg in skeletor_config.get('extra_setup_files_regex', [])] print('extra_setup_regexers') print(extra_setup_regexers) ignored, without_ignored = split_deps(repo_deps, ignore_path_regexers) setup_files, _ = split_deps(repo_deps, extra_setup_regexers) included, without_included = split_deps(without_ignored, include_path_regexers) tests, without_tests = split_deps(included, test_regexers) def find_lib_name(without_tests): """Helper function to find the library name from all non-test modules Parameters ---------- without_tests : iterable of (mod_name, full_path, ImportCatcher) tuples Pass in the second return argument from `split_deps` Returns ------- str The name of the library (hopefully!) """ non_test_paths = [full_path for mod_name, full_path, catcher in without_tests] common_path = os.path.commonprefix(non_test_paths) return common_path.strip(os.sep).split(os.sep)[-1] importable_lib_name = find_lib_name(without_tests) skeletor_config['blacklist_packages'].append(importable_lib_name) # find the runtime deps runtime_deps = get_runtime_deps( without_tests, blacklisted_packages=skeletor_config.get('blacklist_packages') ) print('runtime_deps') print(runtime_deps) # find the testing time deps test_requires = get_runtime_deps( tests, blacklisted_packages=skeletor_config.get('blacklist_packages') ) print('test time deps') print(test_requires) setup_info = None for mod_name, full_path, mod_deps in without_ignored: if('setup.py' in full_path): setup_info = (mod_name, full_path, mod_deps) if setup_info: try: setup_info_dict = setup_parser.parse(setup_info[1]) print('setup_info\n') print(setup_info_dict) except IndexError: # Occurs when setup.py has a call like setup(**kwargs). Looking at # you pims... setup_info_dict = {} else: print("No setup.py file found. Is this a python project?") raise # grab the code out of the setup.py file to find its deps so that I can # parse the code for imports setup_deps = None code = '' for mod_name, full_path, catcher in setup_files: with open(full_path, 'r') as f: code += f.read() setup_deps = depfinder.get_imported_libs(code).describe() print('setup_deps') print(setup_deps) # create the git repo path so that I can determine the version string from # the source code git_repo_path = os.path.dirname(setup_info[1]) template_info = construct_template_info(git_repo_path, setup_info_dict, setup_deps=setup_deps, user_config=skeletor_config) # remove self-references try: runtime_deps.remove(template_info['packagename']) except ValueError: pass try: test_requires.remove(template_info['packagename']) except ValueError: pass # remap deps for k, v in package_mapping.items(): if k in runtime_deps: runtime_deps.remove(k) runtime_deps.append(v) if k in test_requires: test_requires.remove(k) test_requires.append(v) template_info['run_requirements'] = runtime_deps template_info['test_requires'] = test_requires if 'test_imports' not in template_info: template_info['test_imports'] = find_test_imports(importable_lib_name, without_tests) print('template_info') print(template_info) # load and render the template template_dir = os.path.join(conda_skeletor_content, 'templates') print('template directory = %s' % template_dir) # create the jinja environment jinja_env = Environment(loader=FileSystemLoader(template_dir)) if skeletor_config.get('generate_test_suite', 'False'): if 'nose' in test_requires: template_info['test_commands'] = ['nosetests -v %s' % importable_lib_name] elif 'pytest' in test_requires: template_info['test_commands'] = ['py.test -v %s' % importable_lib_name] template = jinja_env.get_template('meta.tmpl') # template.render(**setup_info) meta_fname = os.path.join(output_dir, 'meta.yaml') with open(meta_fname, 'w') as f: f.write(template.render(**template_info)) build_bash_fname = os.path.join(output_dir, 'build.sh') with open(build_bash_fname, 'w') as f: f.write(DEFAULT_BUILD_BASH)
def extract_crf(fin, fout): lines = [] temp = [] chunks = {} linechunks = [] with open(fin, 'r') as f: i = 0 for line in f.readlines(): if line.startswith('+++++++++++++'): lines.append(temp) if not i in chunks.keys(): chunks[i] = linechunks else: chunks[i].append(linechunks) linechunks = [] temp = [] i += 1 continue if line.startswith('-------------'): continue l = [] line = line.split('\t') reverse_line = line[::-1] if int(reverse_line[0]) - int(reverse_line[1]) > 1: linechunks.append(line) continue s = line[2] print(s) s = s.replace(' ', '_') print(s) l.append(s) l.append(line[1]) temp.append(l) for i in range(len(lines)): if not i in chunks.keys(): continue line = lines[i] for ls in chunks[i]: b = int(ls[::-1][1]) e = int(ls[::-1][0]) if ls[1].startswith('*verb'): suffix = '-VP' else: suffix = '-NP' for j in range(b, e): if j == b: if len(line[j]) < 3: line[j].append('B' + suffix) continue if j == e - 1: if len(line[j]) < 3: line[j].append('O' + suffix) continue if len(line[j]) < 3: line[j].append('I' + suffix) for line in lines: for l in line: if len(l) < 3: if l[1].startswith('verb'): l.append('B-VP') else: l.append('B-NP') finals = [] for line in lines: if NO_PUNCT: line = [l for l in line if l[1] != 'punct'] finals.append(line) lines = finals with open(fout, 'w') as f: for l in lines: for line in l: f.write(' '.join(line) + '\n') f.write('\n')
def config(): """ Loads a yaml config file from posted file or get filename :return: """ pano_config_dict = { "camera_config_file": str, "ptz_config_file": str, "HTTP_login": str, "field_of_view": str, "overlap": float, "zoom": int, "focus": int, "first_corner": str, "second_corner": str, "use_focus_at_center": bool, "scan_order": int, "min_free_space": int, "pano_loop_interval": int, "pano_grid_size": str, "pano_main_folder": str, "pano_loop_interval": int, "pano_start_hour": int, "pano_end_hour": int, "pano_start_min": int, "pano_wait_min": int, "remote_storage_address": str, "remote_storage_username": str, "remote_storage_password": str, "remote_folder": str, "local_folder": str, "camera_name": str, "pano_fallback_folder": str, "max_no_pano_images": int, "min_free_space": int } cam_config_dict = { "ip": str, "username": str, "password": str, "HTTP_login": str, "image_size_list": list, "horizontal_fov_list": list, "vertical_fov_list": list, "zoom_list_out": list, "zoom_pos": int, "zoom_range": list, "URL_set_image_size": str, "URL_set_zoom": str, "URL_set_focus": str, "URL_set_focus_mode": str, "URL_get_image": str, "URL_get_image_size": str, "URL_get_zoom": str, "URL_get_focus": str, "RET_get_image": str, "RET_set_image_size": str, "RET_set_zoom": str, "RET_set_focus": str, "RET_get_image_size": str, "RET_get_zoom": str, "RET_get_focus": str } ptz_config_dict = { "ip": str, "username": str, "password": str, "type": str, "pan_range": list, "tilt_range": list, "pan_tilt_scale": float, "URL_set_pan_tilt": str, "URL_get_pan_tilt": str, "RET_get_pan_tilt": str } def load_config_file(session_key, filename, yml=None): if not yml: if not os.path.isfile(filename): flash(u'No file.', 'error') return None try: with open(filename, 'r') as f: yml = yaml.load(f.read()) except Exception as e: flash(u'Couldnt read yaml file: {}'.format(str(e))) return None session[session_key + "_fn"] = filename if filename is not None else None flashes = [] if session_key == "camera_config": for k, v in yml.items(): if k not in cam_config_dict.keys(): flashes.append(u'{}'.format(k)) else: session[session_key][k] = v elif session_key == "ptz_config": for k, v in yml.items(): if k not in ptz_config_dict.keys(): flashes.append(u'{}'.format(k)) else: session[session_key][k] = v elif session_key == "pano_config": for k, v in yml.items(): if k not in pano_config_dict.keys(): flashes.append(u'{}'.format(k)) else: session[session_key][k] = v if len(flashes): flash(u", ".join(flashes) + " are not valid or arent configuration options for a {}".format( session_key.replace("_", " ")), "warning") else: flash(u'Valid config!') files = glob("*.yml") files.extend(glob("*.yaml")) pcfg, ccfg, ptzcfg = sort_validate_configs(files) panoform = PanoConfigForm(request.form, prefix="panoform") ptzform = PTZConfigForm(request.form, prefix="ptzform") camform = CameraConfigForm(request.form, prefix="camform") if request.method == "POST": for k in request.form.keys(): if k.split("-")[-1] == 'sel': a = {'camera-sel': "camera_config", 'ptz-sel': "ptz_config", 'pano-sel': "pano_config"} load_config_file(a[k], request.form[k]) if len(request.files): if "pano-config-file" in request.files.keys(): f = request.files['pano-config-file'] if f and allowed_file(f.filename, ["yml", "yaml"]): load_config_file("pano_config", "", yml=convert_config(yaml.load(f.read()))) if "ptz-config-file" in request.files.keys(): f = request.files['ptz-config-file'] if f and allowed_file(f.filename, ["yml", "yaml"]): load_config_file("ptz_config", "", yml=convert_config(yaml.load(f.read()))) if "cam-config-file" in request.files.keys(): f = request.files['cam-config-file'] if f and allowed_file(f.filename, ["yml", "yaml"]): load_config_file("camera_config", "", yml=convert_config(yaml.load(f.read()))) if panoform.validate() and panoform.submit.data: for k in [x for x in vars(panoform) if not x.startswith("_") and not x == "meta"]: session['pano_config'][k] = panoform[k].data if ptzform.validate() and ptzform.submit.data: for k in [x for x in vars(ptzform) if not x.startswith("_") and not x == "meta"]: session['ptz_config'][k] = ptzform[k].data if camform.validate() and camform.submit.data: for k in [x for x in vars(camform) if not x.startswith("_") and not x == "meta"]: session['camera_config'][k] = camform[k].data for k, v in session['pano_config'].items(): # check to see whether the panorama form has a value that can be set from the session data if v is not None: try: panoform[k].data = v except Exception as e: print(u'Exception repopulating form: {}'.format(str(e))) for k, v in session['ptz_config'].items(): # check to see whether the panorama form has a value that can be set from the session data if v is not None: try: ptzform[k].data = v except Exception as e: print(u'Exception repopulating form: {}'.format(str(e))) for k, v in session['camera_config'].items(): # check to see whether the panorama form has a value that can be set from the session data if v is not None: try: camform[k].data = v except Exception as e: pass # print(u'Exception repopulating form: {}'.format(str(e))) template_data = { "panoform": panoform, "ptzform": ptzform, 'camform': camform, "pano_config_dict": pano_config_dict, "cam_config_dict": cam_config_dict, "ptz_config_dict": ptz_config_dict, "pcfg": pcfg, "ccfg": ccfg, "ptzcfg": ptzcfg } return render_template("config-edit.html", **template_data)
def test(): locations = ['Salem, OR', 'Banks, OR', 'Portland, OR'] searcher = SearchAgent(KEY, *locations) searcher.granularity = 20 print(searcher.overlap(1,2,0)) '''print(searcher.geom_median)
# trim the first '.' errors = [(key[1:], msg) for key, msg in errors] return errors, result if __name__ == '__main__': from pprint import pprint as print data = { "key1": "value1", "key2": { "key21": "value21" } } should_call_fn = lambda x: not isinstance(x, dict) _transform_dict(data, should_call_fn, fn=lambda x: x.upper()) print(data) print('-' * 60) print(parse_snippet(("int(0,100)&required&default=10"))) print(parse_snippet(("int(0,100)&required&default=10", 'desc'))) print('-' * 60) userid = "int&required", "用户账号" schema = { 'userid': userid, 'userid_list': [userid], 'inner': {'userid': userid}, 'inner_list': [{'userid': userid}] } sche = parse(schema)
def calculate_pano_grid(): """ calculates the panorama fov and grid. :return: """ messages = [] try: pan0, tilt0 = session['1st_corner'] pan1, tilt1 = session['2nd_corner'] except: # flash("First Corner or Second Corner not set. Aborting","error") return jsonify(messages=[{"message": "First Corner or Second Corner not set. Aborting", "classsuffix": "danger"}]) if not (session.get("image_width", None) and session.get("image_height", None)): return jsonify(messages=[{"message": "No image width/height, set it first.", "classsuffix": "danger"}]) HFoV = abs(float(pan0) - float(pan1)) VFoV = abs(float(tilt0) - float(tilt1)) if VFoV <= HFoV <= 2 * VFoV: session['HFoV'] = HFoV session['VFoV'] = VFoV else: return jsonify(messages=[{"message": "invalid Fov: h{} v{}".format(HFoV, VFoV), "classsuffix": "danger"}]) lr = float(pan0) <= float(pan1) tb = float(tilt0) >= float(tilt1) pan_min = float(pan0) if lr else float(pan1) pan_max = float(pan0) if not lr else float(pan1) max_tilt = float(tilt0) if tb else float(tilt1) min_tilt = float(tilt0) if not tb else float(tilt1) print(max_tilt) print(min_tilt) session['top_left_corner'] = [pan_min, max_tilt] session['bottom_right_corner'] = [pan_max, min_tilt] session['pano_rows'] = int(round((max_tilt - min_tilt) / session['VFoV'] / (1.0 - session.get('overlap', 0)))) session['pano_cols'] = int(round((pan_max - pan_min) / session['HFoV'] / (1.0 - session.get('overlap', 0)))) session['pano_total'] = session['pano_rows'] * session['pano_cols'] # Gigapan Sticher only works with 2000 images max if session['pano_total'] > 2000: messages.append({"message": 'Total number of images {} is more than {}'.format(session['pano_total'], 2000), "classsuffix": "warning"}) # todo: set panogridsize info values. if session['pano_rows'] >= 0 and session['pano_cols'] >= 0: scale = 2 # todo: set image size info values here image_width, image_height = (session['image_width'], session['image_height']) while scale > 0: scaled_height = int(scale * image_height) scaled_width = int(scale * image_width) if scaled_height * session['pano_rows'] <= 1080 and \ scaled_width * session['pano_cols'] <= 1920: break scale = scale - 0.001 session["overview_scale"] = scale # session['PanoOverViewScale'] = scale # session['PanoOverViewHeight'] = scaled_height * session['pano_rows'] # session['PanoOverViewWidth'] = scaled_width * session['pano_cols'] # todo; return some updated infor about the state of the panorama variables in the session. overview_msgs = init_pano_overview() messages.extend(overview_msgs) # updatePanoOverView() else: return jsonify(messages=[ {"message": "Invalid number of panorama rows or columns (you need at least 1 of each. Please try again.", "classsuffix": "warning"}]) # todo: return some meaningful json values and represent them in the ui. messages.append({"message": 'successfully calculated the grid'.format(session['pano_total'], 2000), "classsuffix": "danger"}) return jsonify(messages=messages)
This piece of code is unprecedented for the self-defined sequence class and the compute-when-need array. """ from bisect import bisect_left import collections from pprint import pprint as print A = [int(x) for x in input().strip().split()] class FinalEnergyList(collections.Sequence): def __init__(self, f, n): self.f = f self.n = n def __len__(self): return self.n def __getitem__(self, x): if not (0 <= x < self.n): raise IndexError return self.f(x) def f(x): for h in A: x = 2*x - h return x ans = bisect_left(FinalEnergyList(f, pow(10, 5)), 0) print(ans)