def __init__(self, webdriver, base_url, components, **env): """ :Description: Controller for managing components. :param webdriver: Webdriver for controller and components to reference. :type webdriver: WebDriver :param base_url: Base url for navigations, will navigate to this url in init. :type base_url: basestring :param components: Component objects to instantiate. :type components: tuple, list, dict :param env: Key value pairs to pass to instantiated components. :type env: **kwargs => dict """ self.webdriver = self.__patch_webdriver(webdriver) self.js = E2EJS(webdriver) self.base_url = base_url self.logger = logger if not isinstance(components, (tuple, list, dict)): raise TypeError('Components must be either a tuple, list, or dictionary') self.env = Resource(**env) if env else Resource() if isinstance(components, dict): self.components = Resource(**{ name: component(webdriver=self.webdriver, logger=self.logger, env=self.env)} for name, component in components.iteritems()) else: self.components = [ component(webdriver=self.webdriver, logger=self.logger, env=self.env) for component in components] self.webdriver.get(self.base_url)
def initialize(self): ''' Restore system to its initial state and create Process 0 Returns process 0 ''' self.PCB = [None] * 16 self.RCB = [None] * 4 #self.ready_list = deque() self.ready_list = { 2: deque(), 1: deque(), 0: deque() } #### WHERE YOU LAST LEFT OFF 10.28.19 5:20 AM ## Creating Process 0 and placing it into Ready List self.PCB[0] = Process(0, 0, 1, 0) ## Process(id/index, state, parent) self.active_processes += 1 self.ready_list[0].append(self.PCB[0].id) self.run_proc = self.PCB[self.ready_list[0][0]] #print(self.run_proc.id) ## Set up all Resources self.RCB[0] = Resource(0, 1) self.RCB[1] = Resource(1, 1) self.RCB[2] = Resource(2, 2) self.RCB[3] = Resource(3, 3) return 0
def setUp(self): self._resource1 = Resource('test_resource1', [ Limit(2, timedelta(seconds=1)), Limit(3, timedelta(seconds=1)), ]) self._resource2 = Resource('test_resource2', [ Limit(6, timedelta(seconds=1)), Limit(7, timedelta(seconds=1)), ]) self._resource3 = Resource('test_resource3', [ Limit(10, timedelta(seconds=1)), Limit(10, timedelta(seconds=1)), ])
def create_resources_table(): resources_list = [ Resource( 'http://www.ynet.co.il/articles/0,7340,L-4713571,00.html', 'https://images1.ynet.co.il/PicServer4/2014/08/05/5506384/52203970100690640360no.jpg', 'החוש הדומיננטי שיעזור לכם בלימודים', 'החוש הדומיננטי שיעזור לכם בלימודים. אילו טיפים של שימושבחושים יעזרו לכם?' ), Resource( 'http://www.ynet.co.il/articles/0,7340,L-5045541,00.html', 'https://images1.ynet.co.il/PicServer5/2017/11/23/8172884/817287001000100980704no.jpg', '"כ"ט בנובמבר: "שמחה שנמשכה ימים ולילות, הייתה אופוריה"', 'ב1947- הם היו ילדים או צעירים בתחילת דרכם,' + ' אבל את היום הגורלי ב29- בנובמבר הם לא שוכחים עד היום.' + ' "כולם היו צמודים לרדיו. אני זוכרת את התפרצות השמחה, ריקודים והתחבקויות."' ), Resource( 'https://www.calcalist.co.il/world/articles/0,7340,L-3726321,00.html', 'https://images1.calcalist.co.il/PicServer3/2017/11/30/775736/2_l.jpg', 'רוצים נייר טואלט? הזדהו: כך משפרים הסינים את מצב השירותים הציבוריים', 'שבוע קרא נשיא סין שי ג‘ינפינג להמשיך את מהפכת השירותים' + ' הציבוריים עליה הכריז ב-2015. עד כה שופצו ונבנו 68 אלף מתקנים'), Resource( 'http://www.nrg.co.il/online/13/ART2/902/962.html', 'http://www.nrg.co.il/images/archive/465x349/1/646/416.jpg', 'מחקו לכם הודעה בווטסאפ? עדיין תוכלו לקרוא אותה', 'אפליקציה בשם Notification History מאפשרת למשתמשי אנדרואיד' + ' לקורא את הנתונים הזמניים הנשמרים ביומן הפעילות של הסמארטפון. כולל הודעות מחוקות.' ), Resource( 'http://www.nrg.co.il/online/55/ART2/904/542.html', 'http://www.nrg.co.il/images/archive/465x349/1/795/429.jpg', 'גם בחורף: זה בדיוק הזמן לקפוץ לאילת', 'העיר הדרומית נעימה לנופש גם בחודשי החורף.' + ' כעת מוצעים מחירים אטרקטיביים במיוחד בחבילות שכוללות מגוון אטרקציות, לינה וטיסות' ), Resource( 'https://food.walla.co.il/item/3113079', 'https://img.wcdn.co.il/f_auto,w_700/2/5/1/3/2513314-46.jpg', '12 בתי קפה שמתאימים לעבודה עם לפטופ', 'בין אם אתם סטודנטים או עצמאיים, זה תמיד סיפור למצוא בית קפה נעים וטעים לרבוץ בו.' + ' קיבצנו עבורכם 12 מקומות אהובים בדיוק למטרה זו, בארבע הערים הגדולות' ), Resource( 'https://news.walla.co.il/item/3114145', 'https://img.wcdn.co.il/f_auto,w_700/2/4/9/5/2495334-46.jpg', 'שותק על אזריה, נלחם באהוד ברק: בנט מנסה להיבנות כימין ממלכתי', 'כשרגב נלחמת ברעש בתאטרון יפו, בנט משנה בשקט את נהלי סל התרבות כך ' + 'שהחומרים "השמאלנים" ייפלטו. כשהקשת הפוליטית מתרעמת על דיווחי' + ' ה"דיל" של טראמפ עם הפלסטינים, בנט שותק עד שהרשות תסרב.'), Resource( 'https://news.walla.co.il/item/3114283', 'https://img.wcdn.co.il/f_auto,w_700/2/5/1/4/2514588-46.jpg', 'רצח בכל שלושה ימים: צרפת יוצאת למאבק באלימות נגד נשים', 'אחרי ש126- נשים נרצחו בידי בני זוגן בשנה שעברה, ' + 'הציג מקרון צעדים חדשים למלחמה בתופעה. "זאת בושה לצרפת,"' + ' אמר הנשיא שאחת מהבטחות הבחירות שלו הייתה להשיג שוויון מגדרי.') ] return resources_list
def add_capability_list(self, capability_list=None): """Add a capability list Adds either a CapabiltyList object specified in capability_list or else creates a Resource with the URI given in capability_list and adds that to the Source Description """ if (hasattr(capability_list, 'uri')): r = Resource(uri=capability_list.uri, capability=capability_list.capability_name) if (capability_list.describedby is not None): r.link_set(rel='describedby', href=capability_list.describedby) else: r = Resource(uri=capability_list, capability='capabilitylist') self.add(r)
def __init__(self, request): """Box""" resource_provider = ResourceProvider() resource = Resource(resource_provider) self.charts = [] self.chart_type = None self.layout = Layout(request) self.body = '' self.javascript = '' self.chart_name = request.matchdict['box_name'] chart_format = os.path.splitext(request.environ['PATH_INFO'])[1] # Go through the registry, and find the resource for this box # XXX This should be a dictionary for res in RESOURCES_REGISTRY: if res[0] == self.chart_name: self.resources = [res] if chart_format == '.html': self.render_html(request) elif chart_format == '.csv': self.body = resource.get(self.chart_name, CSV, request.matchdict) elif chart_format == '.json': self.body = resource.get(self.chart_name, JSON, request.matchdict) else: print "Format not supported %s" % chart_format raise AttributeError
def import_ds(ds, type=None, parent_id=None): if type == 'parent': ds['is_parent'] = 'true' else: ds.pop('is_parent', None) if type == 'child': ds['parent_dataset'] = parent_id else: ds.pop('parent_dataset', None) dataset_dummy = create_dummy_dataset() dataset_dummy['title'] = ds['title'] dataset_dummy['owner_org'] = owner_org # first run to get name created. ds_created = dataset_dummy.create(create_url, api_key) # then update the dataset with all info dataset_full = load_dataset(ds_created) map_dataset(dataset_full, ds) dataset_full._update(update_url, api_key) # add resource resources = ds.get('distribution', []) for res in resources: resource = Resource() map_resource(resource, res, dataset_full['id']) # skip and report empty resource if resource['url']: res_created = resource.create(resource_url, api_key) else: logging.info(' Empty resource skipped for: %s' % ds['title']) return dataset_full
def upload_new_resources(new_resources_df: pd.DataFrame, firestore_resources: Dict[Resource, str], db, sheet: gspread.models.Worksheet) -> List[int]: length = len(new_resources_df.index) log(f"{length} resources to upload") added = 0 uploaded_rows = list() for index, row in new_resources_df.iterrows(): links = Links(row["card link"], row["website"]) resource = Resource(row["resource name"], True, row["description"], row["image link"], row["category"], row["tags"].split(", "), links) try: if resource not in firestore_resources: db.collection(FIREBASE_COLLECTION).add(resource.to_dict()) log(f"\tAdded {row['resource name']} to {FIREBASE_COLLECTION}") else: db.collection(FIREBASE_COLLECTION).document( firestore_resources[resource]).set(resource.to_dict()) log(f"\tUpdated {row['resource name']} in {FIREBASE_COLLECTION}" ) except: log(f"Error uploading data to firestore. {added} / {length} resources uploaded successfully" ) return uploaded_rows added += 1 uploaded_rows.append(index + 1) log(f"\nAdded {added} / {length} entries to Firestore") return uploaded_rows
def handler(is_running, request_q, response_q): resource_map = { 'user': UserResource, 'session': SessionResource, 'tag': TagResource, } while True and is_running.value: try: # read one incoming request try: request = request_q.get(block=True, timeout=0.001) except: continue # process incoming request and generate response http_request = Request(request['raw']) if http_request.path and http_request.path[0] in resource_map: resource = resource_map[http_request.path[0]](http_request) else: resource = Resource(http_request) response = resource.response.raw() # send back response response_q.put({'id': request['id'], 'raw': response}) except KeyboardInterrupt: pass
def add_file(self, resource_list=None, dir=None, file=None): """Add a single file to resource_list Follows object settings of set_path, set_md5 and set_length. """ try: if self.exclude_file(file): self.logger.debug("Excluding file %s" % (file)) return # get abs filename and also URL if (dir is not None): file = os.path.join(dir, file) if (not os.path.isfile(file) or not (self.include_symlinks or not os.path.islink(file))): return uri = self.mapper.dst_to_src(file) if (uri is None): raise Exception("Internal error, mapping failed") file_stat = os.stat(file) except OSError as e: sys.stderr.write("Ignoring file %s (error: %s)" % (file, str(e))) return timestamp = file_stat.st_mtime #UTC r = Resource(uri=uri, timestamp=timestamp) if (self.set_path): # add full local path r.path = file if (self.set_md5): # add md5 r.md5 = compute_md5_for_file(file) if (self.set_length): # add length r.length = file_stat.st_size resource_list.add(r)
def run(self): """ Get doublicates of article of sameas webservice create datasources with resources updates resources (download content, save content to disk if it is new or was updated) """ self.done = 0 directoryBaseURL = self.config['directoryURL'] dbPediaURL = self.config['dbPediaURL'] directoryURL = "%s%s%s" % (directoryBaseURL, dbPediaURL, self.article) page = json.load(urllib2.urlopen(directoryURL)) duplicates = page[0]["duplicates"] self.total = len(duplicates) # create resources and append resources to datasources for url in duplicates: #DEBUG only list freebase and geonames if True or "freebase" in url or "geonames" in url: resource = Resource(url) if resource.domain not in self.datasources: datasource = Datasource(resource.domain, self.lastdate) self.datasources[resource.domain] = datasource datasource.resources.append(resource) # update datasources, dublicate detection, creation of json for domain, datasource in self.datasources.iteritems(): if not self._stop.is_set(): #do not proceed if stop is set datasource.update() self.done += 1 self.completed = 1 self.callback(self.datasources)
def __init__(self, filterchain_name, serialize=None, default_media_name=None): # to limit the infini import, we import in the init from resource import Resource self.resource = Resource() self.filters = [] # {"filter_name":[observator,]} self.image_observers = {} self.filter_output_observers = [] self.filterchain_name = filterchain_name self.original_image_observer = [] self.dct_global_param = {} self.dct_media_param = {} # If starting filterchain with empty media_name, we take the default # media self.default_media_name = default_media_name if serialize: self.deserialize(filterchain_name, serialize) else: # add default filter self.add_filter(Filter(keys.get_empty_filter_name())) self.do_configure()
def traverse(dct, parent=None): resources = {} for name, res in dct.items(): host = url_to_host(self.base_url) apidoc_url = urljoin(host, res.get('apidoc')) schema_url = urljoin(host, res.get('schema')) resource = Resource( self, name, res.get('relativePath'), schema_url, apidoc_url, parent=parent, ) self._resources[name] = resource resources[name] = resource child_resources = {} for att, val in res.items(): if att == 'children': child_resources.update(traverse(val, parent=resource)) resource.children = child_resources return resources
def test_reponse_with_data_and_status_code(self): response_data = dict( complex_response=dict(something='good', something_else='great')) class MyHandler(BaseHandler): """ Handler which returns a response w/ both data and a status code (201) """ allowed_methods = ('POST', ) def create(self, request): resp = rc.CREATED resp.content = response_data return resp resource = Resource(MyHandler) request = HttpRequest() request.method = 'POST' response = resource(request, emitter_format='json') self.assertEquals(201, response.status_code) self.assertTrue(isinstance(response.content, str), "Expected response content to be a string") # compare the original data dict with the json response # converted to a dict self.assertEquals(response_data, simplejson.loads(response.content))
def get_resources(self, limit=None, owner=None): """ Gets a list of all resources. Warning: This takes a long time to complete :return: Resource[] """ resources = list() start_time = time.time() logging.info( "Grabbing {limit} resources from www.hydroshare.org:".format( limit=limit if limit else '')) for resource_json in self.client.resources(owner=owner): if isinstance(limit, int): limit -= 1 elif isinstance(limit, int) and limit < 1: break logging.info(json.dumps(resource_json)) resource = Resource(self.client, raw=resource_json, **resource_json) resources.append(resource) elapsed_time = time.time() - start_time et_string = time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) logging.info("Total time to get HydroShare resources: {time}".format( time=et_string)) return resources
def __init__(self, request, cells): resource_provider = ResourceProvider() self.resource = Resource(resource_provider) self.cells = cells self.resources = self.get_resources() self.charts = self.get_charts(request) self.packages = self.get_packages() self.javascript = render_javascript(self.charts, self.packages)
def add_changed_resources(self, resources, change=None): """Add items from a ResourceContainer resources to this ChangeList If change is specified then the attribute is set in the Resource objects created. """ for resource in resources: rc = Resource( resource=resource, change=change ) self.add(rc)
def __iter__(self): """ Implements a generator to process one file at a time. :return: """ for f in self.path.glob('**/*'): if f.is_file() and not os.stat(str(f.resolve())).st_size == 0: yield Resource(str(f.resolve()), DiskCrawler.compute_digest)
def resource(self, basename): """Creates and returns a resource object from internal resource repository. Repositoy values are copied into the object.""" if not self._repository.has_key(basename): return None uri = self.base_uri + Source.RESOURCE_PATH + "/" + basename timestamp = self._repository[basename]['timestamp'] size = self._repository[basename]['size'] md5 = compute_md5_for_string(self.resource_payload(basename, size)) return Resource(uri = uri, timestamp = timestamp, size = size, md5 = md5)
def resource(session): """Construct the resource instance. Args: session: HTTP session to use with this resource. Returns: A resource object. """ return Resource(session, 'root')
def setUp(self): request_template = \ '%s %s %s\r\n' \ 'User-Agent: test user agent\r\n' \ 'Host: testhost.com\r\n' \ 'Accept: test/mimetype\r\n\r\n' \ '%s' request = Request(request_template % ('GET', '/', 'HTTP/1.0', '')) self.resource = Resource(request)
def upload_new_resources(new_resources_df: pd.DataFrame, firestore_resources: Dict[Resource, str], db, sheet: gspread.models.Worksheet) -> List[int]: length = len(new_resources_df.index) log(f"{length} resources to upload") added = 0 uploaded_rows = list() for index, row in new_resources_df.iterrows(): links = Links(row["card link"], row["website"]) date_created = datetime.now().strftime("%Y/%m/%d %H:%M:%S") resource = Resource(title=row["resource name"], reviewed=True, want_support_with=row["want support with"], this_resource_offers=row["this resource offers"], description=row["description"], img=row["image link"], category=row["category"], tags=row["tags"].split(", "), links=links, date_created=date_created, ranking=row["ranking"]) try: category_document = db.collection(FIREBASE_COLLECTION).document( resource.category.replace("/ ", "_")) if resource not in firestore_resources: category_document.update({ "resource_list": firestore.ArrayUnion([resource.title]) }) # Update resource list category_document.update( {"tag_list": firestore.ArrayUnion(resource.tags)}) # Update tag list category_document.collection("resources").add( resource.to_dict()) # Add new document to collection log(f"\tAdded {row['resource name']} to {FIREBASE_COLLECTION}/{category_document.id}" ) else: resource._date_created = category_document.collection( "resources").document(firestore_resources[resource]).get( ).to_dict()["dateCreated"] category_document.collection("resources").document( firestore_resources[resource]).set(resource.to_dict( )) # Update old document in collection log(f"\tUpdated {row['resource name']} in {FIREBASE_COLLECTION}/{category_document.id}" ) except Exception as e: log(f"Error uploading data to firestore. {added} / {length} resources uploaded successfully" ) print(e) return uploaded_rows added += 1 uploaded_rows.append(index + 1) log(f"\nAdded {added} / {length} entries to Firestore") return uploaded_rows
def from_disk(self, path, url_prefix, inventory=None): """Create or extend inventory with resources from disk scan Assumes very simple disk path to URL mapping: chop path and replace with url_path. Returns the new or extended Inventory object. If a inventory is specified then items are added to that rather than creating a new one. mb = InventoryBuilder() m = inventory_from_disk('/path/to/files','http://example.org/path') """ num = 0 # Either use inventory passed in or make a new one if (inventory is None): inventory = Inventory() # for each file: create Resource object, add, increment counter for dirpath, dirs, files in os.walk(path, topdown=True): for file_in_dirpath in files: try: if self.exclude_file(file_in_dirpath): continue # get abs filename and also URL file = os.path.join(dirpath, file_in_dirpath) if (not os.path.isfile(file) or not (self.include_symlinks or not os.path.islink(file))): continue rel_path = os.path.relpath(file, start=path) if (os.sep != '/'): # if directory path sep isn't / then translate for URI rel_path = rel_path.replace(os.sep, '/') url = url_prefix + '/' + rel_path file_stat = os.stat(file) except OSError as e: sys.stderr.write("Ignoring file %s (error: %s)" % (file, str(e))) continue mtime = file_stat.st_mtime lastmod = datetime.fromtimestamp(mtime).isoformat() r = Resource(uri=url, lastmod=lastmod) if (self.do_md5): # add md5 r.md5 = compute_md5_for_file(file) if (self.do_size): # add size r.size = file_stat.st_size inventory.add(r) # prune list of dirs based on self.exclude_dirs for exclude in self.exclude_dirs: if exclude in dirs: dirs.remove(exclude) return (inventory)
def test_become_ready(self): event = Event() def on_ready(value): nonlocal event event.set() res = Resource(on_ready) self.assertFalse(event.is_set(), 'resource should not be ready') self.assertTrue(event.wait(1), 'resource should be ready')
def get_resources(subdirectory_paths): resources = set() for path in subdirectory_paths: resource_file_names = get_file_names(path, Resource.proper_extensions) resource_names = set() for resource_file_name in resource_file_names: resource_names.add(get_resource_name(resource_file_name)) for resource_name in resource_names: resources.add(Resource(path, resource_name)) return resources
def document(handler_lua=None, handler_lua_template=None): from document import Document from resource import Resource doc = Document() if handler_lua_template: import deje.handlers.lua as handlers handler_lua = getattr(handlers, handler_lua_template)() if handler_lua: handler = Resource('/handler.lua', handler_lua, 'The primary handler', 'text/lua') doc.add_resource(handler) return doc
def __init__(self, t_num, r_num): self.t_dict = {} self.r_dict = {} self.d_list = []# dispatch_list for i in xrange(t_num): t = Task() self.t_dict[t.t_id] = t for i in xrange(r_num): r = Resource() self.r_dict[r.r_id] = r # 尚未完成的任务集合 self.t_dict_unsolved = copy.copy(self.t_dict)
def load_resource(self): """ This function is to load resource object date form storage. :return: resource object list. """ data_resource = [] data_file = open(self.storage) deliveries_reader = csv.reader(data_file) for row in deliveries_reader: if row[0] == 'R': data_resource.append(Resource(row[1], row[2])) data_file.close() return data_resource
def __init__(self, jobs, dockers): super(CriticalPathFirst, self).__init__() self.jobs = jobs self.dockers = dockers #order matters? self._topo_sort() self.find_critical_path() # Initialise schedule. self.resources = [ Resource(resource_id=i, num_cores=2) for i in xrange(5) ] #list of Resource objects # Resource that will be used to execute a task. self.assigned_resource_core = defaultdict(list) self.schedule_all = defaultdict(dict) ##
def resource(self, basename): """Creates and returns a resource object from internal resource repository""" if not self._repository.has_key(basename): return None host = self.hostname port = str(self.port) path = Source.RESOURCE_PATH uri = "http://" + host + ":" + port + path + "/" + basename timestamp = self._repository[basename]['timestamp'] size = self._repository[basename]['size'] md5 = compute_md5_for_string(self.resource_payload(basename, size)) return Resource(uri = uri, timestamp = timestamp, size = size, md5 = md5)