Esempio n. 1
0
    def __init__(self):
        self.location = Location(50,440)
        self.size = Location(40, 40)

        self.sprite = pygame.sprite.Sprite()
        self.sprite.image = pygame.image.load("player.png").convert()
        self.sprite.rect = self.sprite.image.get_rect()
Esempio n. 2
0
    def __init__(self):
        self.location = Location(0, 0)
        self.reset()  #Randomly pick a starting location

        self.size = Location(40, 40)
        self.sprite = pygame.sprite.Sprite()
        self.sprite.image = pygame.image.load("asteroid.png").convert()
        self.sprite.rect = self.sprite.image.get_rect()
Esempio n. 3
0
 def __init__(self):
     self.location = Location(50, 440)
     self.size = Location(40, 40)
     self.sprite = pygame.sprite.Sprite()
     self.sprite.image = pygame.image.load("images/witch.png").convert()
     self.sprite.image = pygame.transform.scale(self.sprite.image, (50, 50))
     self.sprite.rect = self.sprite.image.get_rect()
     self.travelling_left = True
Esempio n. 4
0
    def location(self):
        """
        """

        if not hasattr(self, '_location'):
            self._location = None
            ll = self.request.cookies.get('ll', None)
            if ll is not None:
                parts = ll.split('%2C')
                location = Location(parts[0], parts[1])
                location.validate()
                self._location = location

        return self._location
Esempio n. 5
0
 def location(self):
     """
     """
     
     if not hasattr(self, '_location'):
         self._location = None
         ll = self.request.cookies.get('ll', None)
         if ll is not None:
             parts = ll.split('%2C')
             location = Location(parts[0], parts[1])
             location.validate()
             self._location = location
         
     return self._location
Esempio n. 6
0
class Player(object):
    def __init__(self):
        self.location = Location(50,440)
        self.size = Location(40, 40)

        self.sprite = pygame.sprite.Sprite()
        self.sprite.image = pygame.image.load("player.png").convert()
        self.sprite.rect = self.sprite.image.get_rect()

    def update(self):
        if pygame.key.get_pressed()[pygame.K_LEFT] != 0:
            # We need to move to the LEFT
            pass

        if pygame.key.get_pressed()[pygame.K_RIGHT] != 0:
            # We need to move to the RIGHT
            pass

        # Make sure we can't leave the screen
        if self.location.x > 560:
            self.location.x = 560

        if self.location.x < 0:
            self.location.x = 0

    def draw(self, screen):
        screen.blit(self.sprite.image, self.location.get_loc())

    def reset(self):
        self.destroy()

    def destroy(self):
        # Reset back to our location
        self.location.x = 50
        self.location.y = 440
Esempio n. 7
0
class Asteroid(object):
    def __init__(self):
        self.location = Location(0,0)
        self.reset() #Randomly pick a starting location

        self.size = Location(40,40)
        self.sprite = pygame.sprite.Sprite()
        self.sprite.image = pygame.image.load("asteroid.png").convert()
        self.sprite.rect = self.sprite.image.get_rect()

    def update(self):
        self.location.y += 10
        if self.location.y > 500:
            self.reset()

    def reset(self):
        # We have reached the bottom of the screen so reset
        self.location.y = 0
        self.location.x = random.randint(0, 560)

    def draw(self, screen):
        screen.blit(self.sprite.image, self.location.get_loc())

    def check_collision(self, obj):
        # Check if the two objects are touching
        diffx = self.location.x - obj.location.x
        diffy = obj.location.y - self.location.y
        if diffx < self.size.x and diffx > (self.size.x * -1):
            if diffy < self.size.y and diffy > (self.size.y * -1):
                return True
        return False
Esempio n. 8
0
class Asteroid(object):
    def __init__(self):
        self.location = Location(0, 0)
        self.reset()  #Randomly pick a starting location

        self.size = Location(40, 40)
        self.sprite = pygame.sprite.Sprite()
        self.sprite.image = pygame.image.load("asteroid.png").convert()
        self.sprite.rect = self.sprite.image.get_rect()

    def update(self):
        self.location.y += 10
        if self.location.y > 500:
            self.reset()

    def reset(self):
        # We have reached the bottom of the screen so reset
        self.location.y = 0
        self.location.x = random.randint(0, 560)

    def draw(self, screen):
        screen.blit(self.sprite.image, self.location.get_loc())

    def check_collision(self, obj):
        # Check if the two objects are touching
        diffx = self.location.x - obj.location.x
        diffy = obj.location.y - self.location.y
        if diffx < self.size.x and diffx > (self.size.x * -1):
            if diffy < self.size.y and diffy > (self.size.y * -1):
                return True
        return False
Esempio n. 9
0
    def __init__(self):
        self.location = Location(0,0)
        self.reset() #Randomly pick a starting location

        self.size = Location(40,40)
        self.sprite = pygame.sprite.Sprite()
        self.sprite.image = pygame.image.load("asteroid.png").convert()
        self.sprite.rect = self.sprite.image.get_rect()
Esempio n. 10
0
 def load_from_linestring(cls, linestring: str) -> "Road":
     linestring = str_between(linestring, '(', ')')
     linestring_spited = linestring.split(', ')
     locations = []
     for item in linestring_spited:
         lat, lon = item.split()
         locations.append(Location(float(lat), float(lon)))
     return Road(locations)
Esempio n. 11
0
    def need_to_scan(self, lawnmower):
        for diff in [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1),
                     (-1, 0), (-1, 1)]:
            if not Location(
                    diff[0] + lawnmower.current_location.x, diff[1] +
                    lawnmower.current_location.y) in self.knowledge_gained:
                return True

        return False
Esempio n. 12
0
 def update_mowed_grass_locations(self, mowed_locations):
     '''mowed_locations is a list of Locations. Since in a single turn, grass mowed could be 0,1 or 2 squares'''
     if len(mowed_locations):
         for loc in mowed_locations:
             tmp_loc = Location(loc.x, loc.y)
             self.mowed_grass_locations.append(tmp_loc)
             self.update_mowed_grass_count(1)
             if loc in self.non_mowed_grass_locations:
                 self.non_mowed_grass_locations.remove(tmp_loc)
Esempio n. 13
0
 def make_scan_data_available(self, lawn_mdata):
     """ This method makes the surrounding scan info available to all the puppies"""
     for puppy in self.puppy_list:
         scanned_loc = Location.scanned_location_list(puppy.location)
         scanned_content = list()
         for loc in scanned_loc:
             if lawn_mdata.get(loc) == None:
                 scanned_content.append(const.FENCE)
             else:
                 scanned_content.append(lawn_mdata.get(loc))
         puppy.scanned_data = scanned_content
 def compute_grass_locations(width, height, obs_loc):
     all_loc = grass_loc = list()
     for x in range(width):
         for y in range(height):
             all_loc.append(Location(x, y))
     grass_loc = all_loc
     for loc in all_loc:
         for entry in obs_loc:
             if entry == loc:
                 grass_loc.remove(loc)
     return grass_loc
Esempio n. 15
0
    def scrape_jobs(self):

        try:
            jobs = self.browser.execute_script(
                """return (
                function(){ 
                 var jobs = []; 
                 var els = document.getElementById('experience-section').getElementsByTagName('ul')[0].getElementsByTagName('li');
                 for (var i=0;i<els.length; i++){
                    if(els[i].className!='pv-entity__position-group-role-item-fading-timeline'){   
                    if(els[i].getElementsByClassName('pv-entity__position-group-role-item-fading-timeline').length>0){ 
                     } 
                    else { 
                        try {position = els[i].getElementsByClassName('pv-entity__summary-info')[0].getElementsByTagName('h3')[0].innerText;} 
                        catch(err) { position = ''; } 
                        try { company_name = els[i].getElementsByClassName('pv-entity__summary-info')[0].getElementsByClassName('pv-entity__secondary-title')[0].innerText;}
                        catch (err) { company_name = ''; }
                        try{date_ranges = els[i].getElementsByClassName('pv-entity__summary-info')[0].getElementsByClassName('pv-entity__date-range')[0].getElementsByTagName('span')[1].innerText;}
                        catch (err) {date_ranges = ''; }
                        try {exp=els[i].getElementsByClassName('pv-entity__summary-info')[0].getElementsByTagName('h4')[1].getElementsByTagName('span')[1].innerText;}
                        catch(err) {exp='';}        
                        try{job_location = els[i].getElementsByClassName('pv-entity__summary-info')[0].getElementsByClassName('pv-entity__location')[0].getElementsByTagName('span')[1].innerText;}
                        catch (err) {job_location = ''; }
                        try{company_url =els[i].getElementsByTagName('a')[0].href;} 
                        catch (err) {company_url = ''; }
                        jobs.push([position, company_name, company_url, date_ranges, exp, job_location]);}}}
                        return jobs; })();""")
        except WebDriverException:
            jobs = []

        parsed_jobs = []

        for job in jobs:
         try:
            company_industry, company_employees = self.scrape_company_details(job[2])

            parsed_jobs.append(
                Job(
                    position=job[0],
                    company=Company(
                        name=job[1],
                        industry=company_industry,
                        employees=company_employees,
                    ),
                    location=Location(job[5]),
                    exp=job[4],
                    date_range=job[3]
                )
            )
         except:
         	pass
        return parsed_jobs
Esempio n. 16
0
 def make_scan_data_available(self, lawn_mdata):
     """ This method makes the surrounding scan info available to all the lawnmowers"""
     for lawnmower in self.lawnmowers:
         curr_loc = lawnmower.get_lawnmower_current_location()
         scanned_loc = Location.scanned_location_list(curr_loc)
         scanned_content = list()
         for loc in scanned_loc:
             if lawn_mdata.get(loc) == None:
                 scanned_content.append(const.FENCE)
             else:
                 scanned_content.append(lawn_mdata.get(loc))
         lawnmower.scanned_data = scanned_content
         lawnmower.loc_content = lawn_mdata.get(lawnmower.current_location)
Esempio n. 17
0
def proxy(domain, port, fn):
    conf = Server(
        Location(
            "/",
            include="proxy_params",
            proxy_pass=f"http://127.0.0.1:{port}",
        ),
        listen=NGINX_PORT,
        server_name=domain,
    )

    with open(f"/etc/nginx/sites-enabled/{fn}", "w") as f:
        f.write(str(conf))
Esempio n. 18
0
class Player(object):
    def __init__(self):
        self.location = Location(50, 440)
        self.size = Location(40, 40)
        self.sprite = pygame.sprite.Sprite()
        self.sprite.image = pygame.image.load("images/witch.png").convert()
        self.sprite.image = pygame.transform.scale(self.sprite.image, (50, 50))
        self.sprite.rect = self.sprite.image.get_rect()
        self.travelling_left = True

    def update(self):
        if pygame.key.get_pressed()[pygame.K_LEFT] != 0:
            self.location.x -= 5
            self.travelling_left = True

        if pygame.key.get_pressed()[pygame.K_RIGHT] != 0:
            self.location.x += 5
            self.travelling_left = False

        # Make sure we can't leave the screen
        if self.location.x > 560:
            self.location.x = 560

        if self.location.x < 0:
            self.location.x = 0

    def draw(self, screen):
        if self.travelling_left:
            screen.blit(self.sprite.image, self.location.get_loc())
        else:
            img = pygame.transform.flip(self.sprite.image, True, False)
            screen.blit(img, self.location.get_loc())

    def destroy(self):
        # Reset back to our location
        self.location.x = 50
        self.location.y = 440
Esempio n. 19
0
    def __init__(self, obs):
        super().__init__()
        self.obs = obs
        np.set_printoptions(threshold=np.nan)

        bases = locate_deposits(
            obs).tolist()  # possible bases (minimap coords)

        tree = spatial.KDTree(bases)
        starting_base_index = tree.query(
            [get_mean_player_position(obs, 'feature_minimap')])[1][0]

        self.bases = [Base(Location(loc, None), BASE_EMPTY) for loc in bases]
        self.bases[starting_base_index].ownership = BASE_SELF
        self.starting_base = self.bases[starting_base_index]
        self.starting_base.init_production(build_orders.TERRAN_MARINE_RUSH)

        # the starting base already has a command center / add it to the base's producers
        command_y, command_x = (obs.observation['feature_screen'][_UNIT_TYPE]
                                == _COMMAND_CENTER).nonzero()
        command_y, command_x = np.median(command_y), np.median(command_x)
        self.starting_base.buildings[_COMMAND_CENTER].append(
            Location(self.starting_base.location.minimap,
                     (command_x, command_y)))
Esempio n. 20
0
    def __init__(self):
        self.location = Location(100,0)
        self.size = Location(40,40)
        #self.sprite = pygame.sprite.Sprite()
        #self.sprite.rect = self.sprite.image.get_rect()
        self.good_type = None
	self.cur_image = 0
	self.images = []
        for i in range(0,7):
            self.images.append(pygame.sprite.Sprite())
	self.images[0].image = pygame.image.load("images/eyeball.png").convert()
        self.images[0].image = pygame.transform.scale(self.images[0].image, (50,50))
       
       
        self.images[1].image = pygame.image.load("images/bat.png").convert()
        self.images[1].image = pygame.transform.scale(self.images[1].image, (50,50))

        self.images[2].image = pygame.image.load("images/small_snake.png").convert()
        self.images[2].image = pygame.transform.scale(self.images[2].image, (32,64))
        
        self.images[3].image = pygame.image.load("images/jaws.png").convert()
        self.images[3].image = pygame.transform.scale(self.images[3].image, (50,50))
 
        self.images[4].image = pygame.image.load("images/colorful-1321413.png").convert()
        self.images[4].image = pygame.transform.scale(self.images[4].image, (51,37))
  
        self.images[5].image = pygame.image.load("images/dog.png").convert()
        self.images[5].image = pygame.transform.scale(self.images[5].image, (40,50))
 
        self.images[6].image = pygame.image.load("images/small_flower.png").convert()
        self.images[6].image = pygame.transform.scale(self.images[6].image, (54,50))
       
        for i in range(0, len(self.images)):
            self.images[i].rect = self.images[i].image.get_rect()
 
        self.get_rand_image()
Esempio n. 21
0
    def move(self):
        #print("Scan by Puppy: ", self.scanned_data)
        scan_list = self.scanned_data
        loc_list = Location.scanned_location_list(self.location)
        #print "loc list : ", loc_list
        possible_move = list()
        for i in range(8):
            if scan_list[i] == const.GRASS or scan_list[i] == const.EMPTY:
                possible_move.append(loc_list[i])

        if len(possible_move):
            loc_id = random.randint(0, (len(possible_move) - 1))
            self.set_obstacle_location(possible_move[loc_id])
            print("Puppy: moved to ", possible_move[loc_id])
            return possible_move[loc_id]

        return [0, 0]
def filter_by_location(data, place):
    LAT_LON_BOUNDS = {
        'western_europe':
        [Location(35.606383, -11.094409),
         Location(58.715069, 15.972913)],
        'us': [Location(30, -126), Location(49, -67)],
        'ca': [Location(32.18, -124.49),
               Location(41.94, -114.77)],
    }
    result = []
    bounds = LAT_LON_BOUNDS[place]
    for entry in data:
        lat = float(entry['latitude'])
        lon = float(entry['longitude'])
        if lat > bounds[0].lat and lat < bounds[1].lat \
                and lon > bounds[0].lon and lon < bounds[1].lon:
            result.append(entry)
    return result
Esempio n. 23
0
def start():
    username = request.form.get("username")

    try:
        sh("id", "-u", username)
        user_exists = True
    except subprocess.CalledProcessError:
        user_exists = False

    if is_prod_build():
        domain = f"{username}.{get_host()}"
    else:
        domain = f"{username}-{get_host()}"

    if not user_exists:
        print(f"User {username} doesn't exist, creating...", file=sys.stderr)
        sh("useradd", "-b", "/save", "-m", username, "-s", "/bin/bash")
        print(
            f"Proxying {domain} to {get_hosted_app_name()}...",
            file=sys.stderr,
        )
        add_domain(
            name=get_hosted_app_name(),
            domain=domain,
            proxy_set_header={
                "Host": "$host",
                "Upgrade": "$http_upgrade",
                "Connection": "upgrade",
                "Accept-Encoding": "gzip",
            },
        )

    sh("chown", "-R", username, f"/save/{username}")
    print("Home folder owner set.", file=sys.stderr)

    sh("mkdir", "-p", "/save/.cache")
    sh("chmod", "a+rw", "/save/.cache")

    if not get_server_pid(username):
        print(f"Server for {username} is not running, starting...",
              file=sys.stderr)
        with db_lock("ide", username):
            passwd = gen_salt(24)
            port = get_open_port()

            config = {
                "bind-addr": f"127.0.0.1:{port}",
                "auth": "password",
                "password": passwd,
            }

            with open(f"/save/{username}/.code-server.yaml", "w") as csc:
                yaml.dump(config, csc)

            sh("chown", "-R", username, f"/save/{username}/.code-server.yaml")
            print("Configuration ready.", file=sys.stderr)

            sanitized = os.environ.copy()
            del sanitized["DATABASE_URL"]
            del sanitized["APP_HOME"]
            del sanitized["APP_MASTER_SECRET"]
            del sanitized["ENV"]
            del sanitized["INSTANCE_CONNECTION_NAME"]
            sanitized["PORT"] = str(port)

            print("Environment sanitized.", file=sys.stderr)

            subprocess.Popen(get_server_cmd(username), env=sanitized)
            print("Subprocess opened.", file=sys.stderr)

            conf = Server(
                Location(
                    "/",
                    include="proxy_params",
                    proxy_pass=f"http://127.0.0.1:{port}",
                    proxy_set_header={
                        "Host": "$host",
                        "Upgrade": "$http_upgrade",
                        "Connection": "upgrade",
                        "Accept-Encoding": "gzip",
                    },
                ),
                server_name=domain,
                listen=NGINX_PORT,
                error_page=f"502 https://{get_host()}",
            )

            with open(f"/etc/nginx/sites-enabled/{domain}", "w") as f:
                f.write(str(conf))
            sh("nginx", "-s", "reload")
            print("NGINX configuration written and server restarted.",
                  file=sys.stderr)

    if not os.path.exists(f"/save/{username}/berkeley-cs61a"):
        print(f"Copy of repo for {username} not found.", file=sys.stderr)
        if os.path.exists("/save/root/berkeley-cs61a"):
            print("Found a known good repo, copying...", file=sys.stderr)
            shutil.copytree(
                "/save/root/berkeley-cs61a",
                f"/save/{username}/berkeley-cs61a",
                symlinks=True,
            )
            print(
                "Tree copied. Writing Visual Studio Code associations...",
                file=sys.stderr,
            )
            os.mkdir(f"/save/{username}/berkeley-cs61a/.vscode")
            with open(f"/save/{username}/berkeley-cs61a/.vscode/settings.json",
                      "w") as f:
                f.write(VSCODE_ASSOC)
            print("Done.", file=sys.stderr)
            sh("chown", "-R", username, f"/save/{username}/berkeley-cs61a")
            print("Tree owner changed.", file=sys.stderr)

    print("Waiting for code-server to come alive, if needed...",
          file=sys.stderr)
    while requests.get(f"https://{domain}").status_code != 200:
        time.sleep(1)
    print("code-server is alive.", file=sys.stderr)

    print("IDE ready.", file=sys.stderr)
    return redirect(session.pop(SK_RETURN_TO, url_for("index")))
    def scrap_profile(self, profile_linkedin_url,
                      profile_known_graduation_date):

        if not is_url_valid(profile_linkedin_url):
            return ScrapingResult('BadFormattedLink')

        # Scraping of the profile may fail due to human check forced by LinkedIn
        try:

            # Setting of the delay (seconds) between operations that need to be sure loading of page is ended
            loading_pause_time = 2
            loading_scroll_time = 1

            # Opening of the profile page
            self.browser.get(profile_linkedin_url)

            if not str(self.browser.current_url).strip(
            ) == profile_linkedin_url.strip():
                if self.browser.current_url == 'https://www.linkedin.com/in/unavailable/':
                    return ScrapingResult('ProfileUnavailable')
                else:
                    raise HumanCheckException

            # Scraping the Email Address from Contact Info (email)

            # > click on 'Contact info' link on the page
            self.browser.execute_script(
                "(function(){try{for(i in document.getElementsByTagName('a')){let el = document.getElementsByTagName('a')[i]; "
                "if(el.innerHTML.includes('Contact info')){el.click();}}}catch(e){}})()"
            )
            time.sleep(loading_pause_time)

            # > gets email from the 'Contact info' popup
            try:
                email = self.browser.execute_script(
                    "return (function(){try{for (i in document.getElementsByClassName('pv-contact-info__contact-type')){ let "
                    "el = "
                    "document.getElementsByClassName('pv-contact-info__contact-type')[i]; if(el.className.includes("
                    "'ci-email')){ "
                    "return el.children[2].children[0].innerText; } }} catch(e){return '';}})()"
                )
            except:
                pass

            # Scraping the Phone from Contact Info (email)
            try:
                phone = self.browser.execute_script(
                    "return (function(){try{for (i in document.getElementsByClassName('pv-contact-info__contact-type')){ let "
                    "el = "
                    "document.getElementsByClassName('pv-contact-info__contact-type')[i]; if(el.className.includes("
                    "'ci-phone')){ "
                    "return el.children[2].children[0].innerText; } }} catch(e){return '';}})()"
                )
            except:
                pass

            # Scraping the Birthday from Contact Info (email)
            try:
                birthday = self.browser.execute_script(
                    "return (function(){try{for (i in document.getElementsByClassName('pv-contact-info__contact-type')){ let "
                    "el = "
                    "document.getElementsByClassName('pv-contact-info__contact-type')[i]; if(el.className.includes("
                    "'ci-birthday')){ "
                    "return el.children[2].children[0].innerText; } }} catch(e){return '';}})()"
                )
            except:
                pass

            # Scraping the Date Connected from Contact Info (email)
            try:
                connectedDate = self.browser.execute_script(
                    "return (function(){try{for (i in document.getElementsByClassName('pv-contact-info__contact-type')){ let "
                    "el = "
                    "document.getElementsByClassName('pv-contact-info__contact-type')[i]; if(el.className.includes("
                    "'ci-connected')){ "
                    "return el.children[2].children[0].innerText; } }} catch(e){return '';}})()"
                )

                self.browser.execute_script(
                    "document.getElementsByClassName('artdeco-modal__dismiss')[0].click()"
                )
            except:
                pass

            # Loading the entire page (LinkedIn loads content asynchronously based on your scrolling)
            window_height = self.browser.execute_script(
                "return window.innerHeight")
            scrolls = 1
            while scrolls * window_height < self.browser.execute_script(
                    "return document.body.offsetHeight"):
                self.browser.execute_script(
                    f"window.scrollTo(0, {window_height * scrolls});")
                time.sleep(loading_scroll_time)
                scrolls += 1

            try:
                self.browser.execute_script(
                    "document.getElementsByClassName('pv-profile-section__see-more-inline')[0].click()"
                )
                time.sleep(loading_pause_time)
            except:
                pass

            # Get all the job positions
            try:
                job_positions = self.browser.find_element_by_id(
                    'experience-section').find_elements_by_tag_name('li')
            except:
                job_positions = []

            #Get all the educations
            try:
                educations = self.browser.find_element_by_id(
                    'education-section').find_elements_by_tag_name('li')
            except:
                educations = []

            # Parsing of the page html structure
            soup = BeautifulSoup(self.browser.page_source, 'lxml')

            # Scraping the Name (using soup)
            try:
                name_div = soup.find('div', {'class': 'flex-1 mr5'})
                name_loc = name_div.find_all('ul')
                headline = name_div.find_all('h2')
                headline = headline[0].get_text().strip()
                profile_name = name_loc[0].find('li').get_text().strip()
                locationNConnection = name_loc[1].find_all('li')
                location = locationNConnection[0].get_text().strip()
                try:
                    connection = locationNConnection[1].find('a').find(
                        'span').get_text().strip()
                except:
                    connection = locationNConnection[1].find(
                        'span').get_text().strip()
            except:
                return ScrapingResult('ERROR IN SCRAPING NAME')

            # Scraping the Desc (using soup)
            try:
                self.browser.execute_script(
                    "document.getElementsByClassName('lt-line-clamp__more')[0].click()"
                )
                time.sleep(loading_pause_time)
            except:
                pass

            try:
                if (self.browser.execute_script(
                        "return (els = document.getElementsByClassName('pv-oc')[0].getElementsByClassName('lt-line-clamp__line').length)"
                )):
                    profile_desc = self.browser.execute_script(
                        "return (function(){els = document.getElementsByClassName('pv-oc')[0].getElementsByClassName('lt-line-clamp__line');results = [];for (var i=0; i < els.length; i++){results.push(els[i].innerText);}return results;})()"
                    )

                else:
                    profile_desc = self.browser.execute_script(
                        "return (function(){els = document.getElementsByClassName('pv-oc')[0].getElementsByClassName('lt-line-clamp__raw-line');results = [];for (var i=0; i < els.length; i++){results.push(els[i].innerText);}return results;})()"
                    )

            except:
                profile_desc = []

            # print(profile_desc)

            # Parsing skills
            try:
                self.browser.execute_script(
                    "document.getElementsByClassName('pv-skills-section__additional-skills')[0].click()"
                )
                time.sleep(loading_pause_time)
            except:
                pass

            try:
                skills = self.browser.execute_script(
                    "return (function(){els = document.getElementsByClassName('pv-skill-category-entity');results = [];for (var i=0; i < els.length; i++){results.push(els[i].getElementsByClassName('pv-skill-category-entity__name-text')[0].innerText);}return results;})()"
                )
            except:
                skills = []

            education_list = []
            # Parsing the job positions
            if len(educations) > 0:
                # Parse job positions to extract relative the data ranges
                educations_data_ranges = []
                x = 1
                for education in educations:
                    try:
                        # Scraping of the last (hopefully current) Job
                        exp_section = soup.find('section',
                                                {'id': 'education-section'})
                        exp_section = exp_section.find('ul')
                        div_tags = exp_section.contents[x].find('div')
                        a_tags = div_tags.find('a')
                        x += 1

                        # Scraping of the last Job - company_name, job_title
                        try:
                            education_name = a_tags.find(
                                'h3').get_text().strip()

                        except:
                            eudcation_name = None

                        try:
                            education_degree_name = a_tags.find_all(
                                'p')[0].get_text().strip()
                        except:
                            education_degree_name = None

                        try:
                            education_major = a_tags.find_all(
                                'p')[1].get_text().strip()
                        except:
                            education_major = None

                        try:
                            education_year = a_tags.find_all(
                                'p')[2].get_text().strip()
                        except:
                            education_year = None

                            # last_job_company_name = a_tags.find_all('span')[1].get_text().strip()
                            # last_job_title = exp_section.find('ul').find('li').find_all('span')[2].get_text().strip()

                            # spans = exp_section.find('ul').find('li').find_all('span')

                        #last_job_company_name = last_job_company_name.replace('Full-time', '').replace('Part-time', '').strip()

                        # Scraping of last Job - location
                        # last_job_location = Location()
                        # next_span_is_location = False
                        # for span in spans:
                        #     if next_span_is_location:
                        #         last_job_location.parse_string(span.get_text().strip())
                        #         break
                        #     if span.get_text().strip() == 'Location':
                        #         next_span_is_location = True

                        # # Scraping of Industry related to last Job
                        # last_job_company_url = a_tags.get('href')
                        # if last_job_company_url not in self.industries_dict:
                        #     try:
                        #         self.browser.get('https://www.linkedin.com' + last_job_company_url)
                        #         self.industries_dict[last_job_company_url] = self.browser.execute_script(
                        #             "return document.getElementsByClassName("
                        #             "'org-top-card-summary-info-list__info-item')["
                        #             "0].innerText")
                        #     except:
                        #         self.industries_dict[last_job_company_url] = 'N/A'

                        # last_job_company_industry = self.industries_dict[last_job_company_url]
                        education_list.append(
                            Education(education_name=education_name,
                                      degree_name=education_degree_name,
                                      major=education_major,
                                      year=education_year))

                    except:
                        pass

            for x in range(3 - len(educations)):
                education_list.append(
                    Education(education_name=None,
                              degree_name=None,
                              major=None,
                              year=None))

            last_job = []
            # Parsing the job positions
            if len(job_positions) > 0:
                # Parse job positions to extract relative the data ranges
                job_positions_data_ranges = []
                x = 1
                for job_position in job_positions:
                    # Get the date range of the job position
                    try:
                        date_range_element = job_position.find_element_by_class_name(
                            'pv-entity__date-range')
                        date_range_spans = date_range_element.find_elements_by_tag_name(
                            'span')
                        date_range = date_range_spans[1].text

                        job_positions_data_ranges.append(date_range)

                        # Scraping of the last (hopefully current) Job
                        exp_section = soup.find('section',
                                                {'id': 'experience-section'})
                        exp_section = exp_section.find('ul')
                        div_tags = exp_section.contents[x].find('div')
                        a_tags = div_tags.find('a')
                        x += 1

                        # Scraping of the last Job - company_name, job_title
                        try:
                            last_job_company_name = a_tags.find_all(
                                'p')[1].get_text().strip()
                            last_job_title = a_tags.find(
                                'h3').get_text().strip()

                            spans = a_tags.find_all('span')
                        except:
                            last_job_company_name = a_tags.find_all(
                                'span')[1].get_text().strip()
                            last_job_title = exp_section.find('ul').find(
                                'li').find_all('span')[2].get_text().strip()
                            spans = exp_section.find('ul').find('li').find_all(
                                'span')

                        last_job_company_name = last_job_company_name.replace(
                            'Full-time', '').replace('Part-time', '').strip()

                        # Scraping of last Job - location
                        last_job_location = Location()
                        next_span_is_location = False
                        for span in spans:
                            if next_span_is_location:
                                last_job_location.parse_string(
                                    span.get_text().strip())
                                break
                            if span.get_text().strip() == 'Location':
                                next_span_is_location = True

                        # # Scraping of Industry related to last Job
                        # last_job_company_url = a_tags.get('href')
                        # if last_job_company_url not in self.industries_dict:
                        #     try:
                        #         self.browser.get('https://www.linkedin.com' + last_job_company_url)
                        #         self.industries_dict[last_job_company_url] = self.browser.execute_script(
                        #             "return document.getElementsByClassName("
                        #             "'org-top-card-summary-info-list__info-item')["
                        #             "0].innerText")
                        #     except:
                        #         self.industries_dict[last_job_company_url] = 'N/A'

                        # last_job_company_industry = self.industries_dict[last_job_company_url]

                        last_job.append(
                            Job(
                                position=last_job_title,
                                company=Company(
                                    name=last_job_company_name,
                                    #industry=last_job_company_industry
                                ),
                                location=last_job_location))

                    except:
                        last_job.append(
                            Job(
                                position=None,
                                company=Company(
                                    name=None,
                                    #industry=last_job_company_industry
                                ),
                                location=None))

                for x in range(4 - len(job_positions)):
                    last_job.append(
                        Job(
                            position=None,
                            company=Company(name=None,
                                            #industry=last_job_company_industry
                                            ),
                            location=None))

                print(
                    "profile_name {} \n headline {} \n location {} \n connection {} \n profile_desc {} \n email {} \n phone {} \n birthday {} \n connectedDate {} \n skills {} \n last_job {} \n last_job {} \n last_job {} \n last_job {} \n  education {} \n"
                    .format(profile_name, headline, location, connection,
                            profile_desc, email, phone, birthday,
                            connectedDate, skills, last_job[0], last_job[1],
                            last_job[2], last_job[3], education_list[0]))

                return ScrapingResult(
                    Profile(
                        profile_name, headline, location, connection,
                        connectedDate, phone, birthday, profile_desc, email,
                        skills, last_job,
                        JobHistorySummary(profile_known_graduation_date,
                                          job_positions_data_ranges),
                        education_list))

            else:
                return ScrapingResult(Profile(profile_name, email, skills))

        except HumanCheckException:

            if self.headless_option:
                raise CannotProceedScrapingException

            linkedin_logout(self.browser)

            linkedin_login(self.browser,
                           self.config.get('linkedin', 'username'),
                           self.config.get('linkedin', 'password'))

            while self.browser.current_url != 'https://www.linkedin.com/feed/':
                message_to_user('Please execute manual check', self.config)
                time.sleep(30)

            return self.scrap_profile(profile_linkedin_url,
                                      profile_known_graduation_date)
Esempio n. 25
0
    def caculate_iou(self,fov):
        polygon1 = _sort_vertices_anti_clockwise_and_remove_duplicates(self.points)
        polygon2 = _sort_vertices_anti_clockwise_and_remove_duplicates(fov.points)
        polygon3 = intersect(polygon1, polygon2)
        plot_polygon(polygon1)
        plot_polygon(polygon2)
        #img = np.zeros([500,500],np.int8)
        #cv2.drawContours(img,[np.array(polygon3).reshape(-1,1,2).astype(np.int32)],0,(255,255,255),2)
        if len(polygon3) > 0:
            plot_polygon(polygon3)
            print(cv2.contourArea(np.array(polygon3).reshape(-1,1,2).astype(np.int32))/cv2.contourArea(np.array(polygon1).reshape(-1,1,2).astype(np.int32)))
        plt.show()

if __name__ == '__main__':
    cam1 = Transform(location=Location(x=-64, y=-126, z=4),rotation=Rotation(pitch=0, yaw=175, roll=0))
    cam2 = Transform(location=Location(x=-64, y=-120, z=4),rotation=Rotation(pitch=0, yaw=175, roll=0))
    cam3 = Transform(location=Location(x=-64, y=-126.5, z=4),rotation=Rotation(pitch=0, yaw=155, roll=0))
    cam4 = Transform(location=Location(x=-64, y=-152, z=4),rotation=Rotation(pitch=0, yaw=110, roll=0))
    fov1 = FOV(cam1)
    fov2 = FOV(cam2)
    fov3 = FOV(cam3)
    fov4 = FOV(cam4)
    polygon1 = _sort_vertices_anti_clockwise_and_remove_duplicates(fov1.points)
    polygon2 = _sort_vertices_anti_clockwise_and_remove_duplicates(fov2.points)
    polygon3 = _sort_vertices_anti_clockwise_and_remove_duplicates(fov3.points)
    polygon4 = _sort_vertices_anti_clockwise_and_remove_duplicates(fov4.points)
    plot_polygon(polygon1)
    plot_polygon(polygon2)
    polygon12 = intersect(polygon1, polygon2)
    plot_polygon(polygon12)
Esempio n. 26
0
    def parsing_jobs(self, job_positions):
        job_positions_data_ranges = []
        #array of Jobs
        Jobs_array = []

        for job_position in job_positions:
            #print('job_pos.text: {0}\n--'.format(job_position.text))
            try:
                # Get the date range of the job position
                # get the date_range
                try:
                    date_range_element = job_position.find_element_by_class_name(
                        'pv-entity__date-range')
                    date_range_spans = date_range_element.find_elements_by_tag_name(
                        'span')
                    date_range = date_range_spans[1].text
                    # print('date_range: {0}'.format(date_range))
                except NoSuchElementException:
                    date_range = "N/A"

                try:
                    # get the title
                    title_range_element = job_position.find_element_by_tag_name(
                        'h3')
                    title = title_range_element.text
                    # print('title: {0}'.format(title))
                except NoSuchElementException:
                    title = "N/A"

                try:
                    # get the companyname
                    companyname_range_element = job_position.find_element_by_class_name(
                        'pv-entity__secondary-title')
                    companyname = companyname_range_element
                    companyname = companyname.text.replace(
                        'Full-time', '').replace('Part-time', '').strip()
                    # print('companyname: {0}'.format(companyname))
                except NoSuchElementException:
                    companyname = "N/A"

                try:
                    # get the company info using bautifulsoup
                    company_url_link = job_position.find_element_by_tag_name(
                        'a').get_attribute('href')
                except NoSuchElementException:
                    company_url_link = "N/A"

                try:
                    companylocation_range_element = job_position.find_element_by_class_name(
                        'pv-entity__location')
                    companylocation_spans = companylocation_range_element.find_elements_by_tag_name(
                        'span')
                    companylocation = companylocation_spans[1].text
                except NoSuchElementException:
                    companylocation = "N/A"
                # print('companylocation: {0}'.format(companylocation))

                job_positions_data_ranges.append(date_range)
                info_company = self.get_company_data(company_url_link)
                try:
                    if info_company['companyname'] == "N/A":
                        info_company['companyname'] = companyname
                    if info_company['location'].full_string == "N/A":
                        loc = Location()
                        loc.parse_string(companylocation)
                        info_company['location'] = loc
                except:
                    print("Oops!", sys.exc_info()[0], "occured.")
                    print(info_company['industry'])
                    print(info_company['companyname'])
                    print(info_company['location'])

                trabajo_oo = Job(
                    position=title.strip(),
                    company=Company(name=info_company['companyname'].strip(),
                                    industry=info_company['industry'].strip()),
                    location=info_company['location'],
                    daterange=date_range.strip())
                Jobs_array.append(trabajo_oo)
                # print(trabajo_oo)

            except:
                print("Oops!, \n{}\n{}\n{}\noccured.".format(
                    sys.exc_info()[0],
                    sys.exc_info()[1],
                    sys.exc_info()[2]))
                print("Job untacking error")
                pass

        return {
            'Jobs_array': Jobs_array,
            "job_positions_data_ranges": job_positions_data_ranges
        }
Esempio n. 27
0
    def get_company_data(self, url):
        #print(url)
        no_industry = False
        if url.split("/")[3] != "company":
            print("no company page")
            return {
                'industry': 'N/A',
                'companyname': 'N/A',
                'location': Location('N/A', 'N/A', 'N/A')
            }

        if url not in self.industries_dict:
            try:
                self.browser.execute_script("window.open('');")
                self.browser.switch_to.window(self.browser.window_handles[1])
                self.browser.get(url)
            except:
                print("error opening company page")
                return {
                    'industry': 'N/A',
                    'companyname': 'N/A',
                    'location': Location('N/A', 'N/A', 'N/A')
                }
            try:
                card_summary_divs = self.browser\
                    .find_element_by_class_name('org-top-card-summary-info-list')\
                    .find_elements_by_class_name('org-top-card-summary-info-list__info-item')
                inline_divs = self.browser\
                    .find_element_by_class_name('org-top-card-summary-info-list')\
                    .find_element_by_class_name('inline-block')\
                    .find_elements_by_class_name('org-top-card-summary-info-list__info-item')
                if len(card_summary_divs) == len(inline_divs):
                    no_industry = True
                #print("card_summary_divs {}, inline_divs {}".format(len(card_summary_divs),
                #                                                    len(inline_divs)))
            except:
                print("error getting company data 3")
            #industry
            try:
                if no_industry:
                    self.industries_dict[url] = "N/A"
                else:
                    self.industries_dict[url] = self.browser.execute_script(
                        "return document.getElementsByClassName("
                        "'org-top-card-summary-info-list__info-item')["
                        "0].innerText")
            except:
                #print("industry wasnt scrapped")
                self.industries_dict[url] = 'N/A'
            #companyname
            try:
                self.companies_dict[url] = self.browser.execute_script(
                    "return document.getElementsByClassName("
                    "'org-top-card-summary__title')["
                    "0].title")
            except:
                print("company name wasnt scrapped")
                self.companies_dict[url] = 'N/A'
            #locations
            try:
                if no_industry:
                    self.locations_dict[url] = self.browser.execute_script(
                        "return document.getElementsByClassName("
                        "'org-top-card-summary-info-list__info-item')["
                        "0].innerText")
                else:
                    self.locations_dict[url] = self.browser.execute_script(
                        "return document.getElementsByClassName("
                        "'org-top-card-summary-info-list__info-item')["
                        "1].innerText")
            except:
                print("location name wasnt scrapped")
                self.locations_dict[url] = 'N/A'

            try:
                self.browser.close()
                self.browser.switch_to.window(self.browser.window_handles[0])
            except:
                print("tab did not close")

        industry = self.industries_dict[url]
        companyname = self.companies_dict[url]
        location = Location()
        location.parse_string(self.locations_dict[url])

        return {
            'industry': industry,
            'companyname': companyname,
            'location': location
        }
    def make_simulation_turn(self):

        if self.stopped:
            return
        # Make surrounding data available to all lawnmowers
        self.lawnmower_manager.make_scan_data_available(self.get_lawn_mdata())
        # Make surrounding data available to all puppies
        self.lawn_manager.make_scan_data_available(self.get_lawn_mdata())
        # Poll each lawnmower one by one for a action
        #print("Lawn Mdata: ", self.lawn_mdata)

        if self.polling_mowers:
            print("Polling LM: ")
            if self.mower_index == 0:
                self.turn_count += 1

            lawnmower = self.lawnmower_manager.lawnmowers[self.mower_index]

            if lawnmower.is_stalled:
                lawnmower.decrement_stalled_turns()
            else:
                self.logfile_handle.write("mower,%d\n" % (lawnmower.lawnmower_id))

                self.current_polled_obj = "lawnmower " + str(lawnmower.lawnmower_id)
                print(self.current_polled_obj)

                if self.lawn_manager.is_all_grass_cut():
                    lawnmower.turn_off()
                    self.logfile_handle.write("turn_off\n")
                    self.logfile_handle.write("ok\n")
                    self.stop_simulation()
                    return

                mowed_loc, steps = self.lawnmower_manager.poll_lawnmower(lawnmower)
                self.lawn_manager.lawn.update_mowed_grass_locations(mowed_loc)
                self.recompute_lawn_mdata()
                # Check if the Lawnmower has moved onto puppy_grass or puppy_empty, if so stall
                if len(mowed_loc):
                    if self.lawn_mdata.get(mowed_loc[0]) == const.PUPPY_EMPTY or self.lawn_mdata.get(mowed_loc[0]) == const.PUPPY_GRASS:
                        lawnmower.stall(self.collision_delay)
                if self.lawn_mdata.get(lawnmower.current_location) == const.PUPPY_MOWER:
                    self.lawnmower_manager.get_lawnmower_obj(lawnmower.lawnmower_id).stall(self.collision_delay)

                if lawnmower.is_stalled:
                    self.logfile_handle.write("stall,%d\n" %(steps))

                if lawnmower.latest_action_performed != const.SCAN:
                    self.logfile_handle.write("ok\n")

                # Again update and make surrounding data available to all lawnmowers
                self.lawnmower_manager.make_scan_data_available(self.get_lawn_mdata())


            if self.mower_index == len(self.lawnmower_manager.lawnmowers) - 1:
                # Last mower, reset
                self.polling_mowers = False
                self.mower_index = 0
                if self.is_simulation_termination_condition_met():
                    self.stop_simulation()
            else:
                self.mower_index += 1
        else:
            print("Polling puppy: ")
            puppy = self.lawn_manager.puppy_list[self.puppy_index]
            self.current_polled_obj = "puppy " + str(puppy.id)
            print(self.current_polled_obj)
            old_loc = Location(puppy.location.x, puppy.location.y)
            self.logfile_handle.write("puppy,%d\n" %(puppy.id))
            self.lawn_manager.decide_movement(puppy.id)
            self.logfile_handle.write("ok\n")
            new_loc = Location(puppy.location.x, puppy.location.y)
            # If puppy moves then only
            if old_loc != new_loc:
                print("Puppy Moved.....")
                loc_content_before_puppy_moves = self.lawn_mdata.get(new_loc)
                old_loc_updated_to = self.lawn_mdata.get(old_loc).split("_")[1]
                if loc_content_before_puppy_moves == const.GRASS:
                    self.lawn_mdata.update({new_loc: const.PUPPY_GRASS})
                elif loc_content_before_puppy_moves == const.EMPTY:
                    self.lawn_mdata.update({new_loc: const.PUPPY_EMPTY})
                elif loc_content_before_puppy_moves == const.MOWER:
                    self.lawn_mdata.update({new_loc: const.PUPPY_MOWER})

                self.lawn_mdata.update({old_loc: old_loc_updated_to})
                self.recompute_lawn_mdata()
                # Update and make surrounding data available to all puppies
                self.lawn_manager.make_scan_data_available(self.get_lawn_mdata())
                #print("Lawn Mdata: ", self.lawn_mdata)

            if self.puppy_index == len(self.lawn_manager.puppy_list) - 1:
                # Last puppy, reset
                self.polling_mowers = True
                self.puppy_index = 0
            else:
                self.puppy_index += 1
Esempio n. 29
0
    def execute(self, files):
        inputs = []
        props = ['synth']
        # First file
        for filename in files:
            if filename.startswith('--'):
                # Handle properties
                if filename == '--import':
                    props = ['import']
                elif filename == '--synth':
                    props = ['synth']
                elif filename == '--top':
                    props = ['synth', 'top']
                elif filename == '--tb':
                    props = ['tb']
                else:
                    fatal("unknown property '{0}'".format(filename))
                continue

            # Read the file
            fid = thin.Get_Identifier(filename.encode('utf-8'))
            fe = thin.Read_Source_File(0, fid)
            if fe == thin.No_Source_File_Entry:
                fatal('cannot open {0}'.format(filename))

            fbuf = thin.Get_File_Buffer(fe)
            flen = thin.Get_File_Length(fe)

            # Not very efficient (it copies the string), but let's use it
            # for now.
            filebuf = ctypes.string_at(fbuf, flen)

            input = RuleInput(filename, fe)
            input.filebuf = filebuf
            input.props = props
            inputs.append(input)

            if 'import' not in input.props:
                self._nbr_files += 1

                # Again, not very efficient (creates the substrings).
                flines = filebuf.splitlines(True)

                loc = Location(filename)
                for r in self._file_rules:
                    r.check(loc, flines)

        # Then tokens
        thin.Scanner.Flag_Comment.value = True
        for input in inputs:
            if 'import' not in input.props:
                thin.Scanner.Set_File(input.fe)
                filebuf = input.filebuf
                while True:
                    thin.Scanner.Scan()
                    tok = thin.Scanner.Current_Token.value
                    loc = TokLocation(input.filename,
                                      thin.Scanner.Get_Current_Line(),
                                      thin.Scanner.Get_Token_Column(),
                                      thin.Scanner.Get_Token_Position(),
                                      thin.Scanner.Get_Position())
                    if tok == tokens.Tok.Comment:
                        input.comments[loc.line] = (loc.start, loc.end)
                    for r in self._lex_rules:
                        r.check(loc, filebuf, tok)
                    if tok == tokens.Tok.Eof:
                        break
                thin.Scanner.Close_File()
        if not (self._syntax_rules or self._syntax_node_rules or
                self._sem_rules or self._sem_node_rules or self._synth_rules):
            return

        # Then syntax
        # The parser doesn't handle comments
        thin.Scanner.Flag_Comment.value = False
        # Keep extra locations
        thin.Flags.Flag_Elocations.value = True
        # Keep all parenthesis
        thin.Parse.Flag_Parse_Parenthesis.value = True
        # Be sure to initialize std and work (and only once).
        # Humm, not very elegant.
        if thin.Get_Libraries_Chain() == thin.Null_Iir:
            thin.analyze_init()
        for input in inputs:
            thin.Scanner.Set_File(input.fe)
            loc = Location(input.filename)
            input.ast = thin.Parse.Parse_Design_File()
            if 'import' not in input.props:
                for r in self._syntax_rules:
                    r.check(input, input.ast)
                if self._syntax_node_rules:
                    for n in thinutils.nodes_iter(input.ast):
                        for r in self._syntax_node_rules:
                            r.check(loc, n)
            thin.Scanner.Close_File()

        # Then semantic
        if self._sem_rules or self._sem_node_rules or self._synth_rules:
            # Reduce Canon
            thin.Canon.Flag_Concurrent_Stmts.value = False
            thin.Canon.Flag_Configurations.value = False
            thin.Canon.Flag_Associations.value = False
            # First add all units in the work library, so that they they are
            # known by the analyzer.
            for input in inputs:
                unit_ast = iirs.Get_First_Design_Unit(input.ast)
                while unit_ast != thin.Null_Iir:
                    # Detach the unit from its design file
                    next_unit_ast = iirs.Get_Chain(unit_ast)
                    iirs.Set_Chain(unit_ast, thin.Null_Iir)
                    # Add
                    thin.Add_Design_Unit_Into_Library(unit_ast, False)
                    input.units_ast.append(unit_ast)
                    unit_ast = next_unit_ast
            # Handle all unit
            for input in inputs:
                if 'import' not in input.props:
                    for unit in input.units_ast:
                        if iirs.Get_Library_Unit(unit) == thin.Null_Iir:
                            # Over-written.
                            # FIXME: remove from the list ?
                            continue
                        # Be sure the unit was analyzed. It could have been
                        # already analyzed if referenced. And a unit cannot be
                        # analyzed twice.
                        if iirs.Get_Date_State(unit) == iirs.Date_State.Parse:
                            thin.Finish_Compilation(unit, False)
                            iirs.Set_Date_State(unit, iirs.Date_State.Analyze)
                        for r in self._sem_rules:
                            r.check(input, unit)
                        for n in thinutils.nodes_iter(unit):
                            for r in self._sem_node_rules:
                                r.check(input, n)

            for input in inputs:
                if 'synth' in input.props:
                    for unit in input.units_ast:
                        if iirs.Get_Library_Unit(unit) == thin.Null_Iir:
                            # Over-written.
                            continue
                        for r in self._synth_rules:
                            r.check(input, unit)
 def init_lawn_mdata(self, width, height):
     for x in range(width):
         for y in range(height):
             key = Location(x, y)
             val = const.GRASS
             self.lawn_mdata.update({key:val})
    def process_input(self, filename):
        # Open the default input file or explicitly given file
        file_handle = open(filename,"r")

        # Get lawn width
        tokens = file_handle.readline().strip().split(",")
        width = int(tokens[0])
        # Check for invalid width
        if width <= 0 or width > 15:
            print ("Error: Invalid width")
            return 1

        # Get lawn height
        tokens = file_handle.readline().strip().split(",")
        height = int(tokens[0])
        # Check for invalid height
        if height <= 0 or height > 10:
            print ("Error: Invalid height")
            return 1

        # Initialize lawn_mdata for Simulator object
        self.init_lawn_mdata(width, height)

        # Get lawnmower count
        tokens = file_handle.readline().strip().split(",")
        lawnmower_count = int(tokens[0])

        # Check for invalid lawnmower count
        if lawnmower_count <=0 or lawnmower_count > 10:
            print ("Error: Invalid lawnmower count number")
            return 1

        # Get lawnmower location and lawnmower direction
        tokens = file_handle.readline().strip().split(",")
        # Get the collision delay
        self.collision_delay = int(tokens[0])

        # Check for invalid collison_delay
        if self.collision_delay < 0 or self.collision_delay > 4:
            print ("Error: Invalid collision delay value")
            return 1

        lawnmower_loc = list()
        lawnmower_loc_and_dir = list()
        for i in range(lawnmower_count):
            tokens = file_handle.readline().strip().split(",")
            x, y, direction = int(tokens[0]), int(tokens[1]), str(tokens[2])
            lawnmower_loc.append(Location(x,y))
            # Update associated mdata
            self.lawn_mdata.update({Location(x, y):const.MOWER})
            lawnmower_loc_and_dir.append([Location(x,y), direction])

        # Check if sufficient locations are provided with respect to lawnmower count
        if i < lawnmower_count-1:
            print ("Error: Insufficient locations provided with respect to lawnmower count")
            return 1

        # Obstacle related inputs
        obstacle_list = list()
        # Crater inputs
        tokens = file_handle.readline().strip().split(",")
        crater_count = int(tokens[0])

        # Check for invalid crater count
        if crater_count < 0:
            print ("Error: Invalid crater count")
            return 1
        if (crater_count != 0):
            for i in range(crater_count):
                tokens = file_handle.readline().strip().split(",")
                x, y = int(tokens[0]), int(tokens[1])
                self.lawn_mdata.update({Location(x, y):const.CRATER})
                obstacle_list.append([const.CRATER, Location(x, y)])

            # Check if sufficient locations are provided with respect to crater count
            if i < crater_count-1:
                print ("Error: Insufficient locations provided with respect to crater count")
                return 1

        # Puppy related inputs
        tokens = file_handle.readline().strip().split(",")
        puppy_count = int(tokens[0])

        # Check for the valid puppy count
        if puppy_count < 0 or puppy_count > 6:
            print ("Error: Invalid puppy count")
            return 1

        # Make sure Puppy count is not zero before accepting inputs
        if (puppy_count != 0):
            tokens = file_handle.readline().strip().split(",")
            stay_percentage = int(tokens[0])
            for i in range(puppy_count):
                tokens = file_handle.readline().strip().split(",")
                x, y = int(tokens[0]), int(tokens[1])
                if self.lawn_mdata.get(Location(x, y)) == const.GRASS:
                    self.lawn_mdata.update({Location(x, y): const.PUPPY_GRASS})
                elif self.lawn_mdata.get(Location(x, y)) == const.EMPTY:
                    self.lawn_mdata.update({Location(x, y): const.PUPPY_EMPTY})
                elif self.lawn_mdata.get(Location(x, y)) == const.MOWER:
                    self.lawn_mdata.update({Location(x, y): const.PUPPY_MOWER})
                obstacle_list.append([const.PUPPY, Location(x, y)])

            # Check if valid locations are provided with respect to puppy count
            if i < puppy_count-1:
                print("Error: Insufficient locations provided with respect to puppy count")
                return 1

        tokens = file_handle.readline().strip().split(",")
        max_allowed_turns = int(tokens[0])
        const.TURN_LIMIT = max_allowed_turns

        # Check for invalid max allowed turns value
        if max_allowed_turns < 0 or max_allowed_turns > 300:
            print ("Error: Invalid number of turns")
            return 1

        # Feed data to class-members of Lawn
        self.lawn_manager.create_lawn(width, height)
        obs_loc = list()
        for obs in obstacle_list:
            if obs[0] != const.PUPPY:
                obs_loc.append(obs[1])
        grass_loc = self.compute_grass_locations(width, height, obs_loc)
        self.lawn_manager.set_grass_locations(grass_loc)
        self.lawn_manager.lawn.update_mowed_grass_locations(lawnmower_loc)
        self.lawn_manager.set_stay_percentage(stay_percentage)

        # Feed data to class members of Obstacle, Puppy
        self.lawn_manager.create_obstacle_list(puppy_count, obstacle_list, stay_percentage)

        # Feed data to class members of LawnmowerManager and Lawnmower
        self.lawnmower_manager = LawnmowerManager(lawnmower_loc_and_dir, self.collision_delay)

        self.lawn_manager.set_total_grass_on_lawn()
Esempio n. 32
0
	def __init__(self, doses, path):
		self.location = Location()
		self.doses = doses
		self.path = path
Esempio n. 33
0
from utils import Location

MEAL_TYPES = ["Starter", "Soup", "Main", "Dessert"]

MEAL_CATEGORIES = ["Asian", "Thai", "Slovak", "Indian"]

OFFICE_LOCATION = Location(51.498765, -0.178749)
RESTAURANTS_TO_FETCH = 5