def test_company_total_employee_salary(self): company5 = Company("Apple5") emp13 = Employee("Michael", title = "Software Enigineer", salary = 5000, rank = 3) emp14 = Employee("Nick", title = "Software Enigineer", salary = 7000, rank = 2) company5.addEmployee(emp13) company5.addEmployee(emp14) self.assertTrue(company5.getTotalSalaryofEmployee() == 12000)
def test_company_employee_count2(self): company2 = Company("Apple2") emp10 = Employee("Jack", title = "Secretary", salary = 3000, rank = 3) emp11 = Employee("Kevin", title = "CTO", salary = 53000, rank = 2) company2.addEmployee(emp10) company2.addEmployee(emp11) self.assertTrue(company2.getTotalNumOfEmployee() is 2)
def generate_company_with_groups(number): bosses = [Staff() for i in range(number)] companies = [Company() for i in range(number)] groups = [] # construct company gnames = ['董事会', '总经理', '副总经理', '市场总监', '技术总监', '副总经理', '财务部', '行政人事部', '商务部', '市场发展部', '客户服务部', '外部办事部', '产品研发部', '技术服务部', '系统集成部', '副总经理', '采购部', '生产部', '质检部'] for i in companies: temp_groups = [Group(name=j, companyId=i._id) for j in gnames] # set relation set_parent(temp_groups, [1], 0) set_parent(temp_groups, [2, 3, 4, 5], 1) set_parent(temp_groups, [6, 7, 8], 2) set_parent(temp_groups, [9, 10, 11], 3) set_parent(temp_groups, [12, 13, 14], 4) set_parent(temp_groups, [15, 16, 17], 5) groups += temp_groups group_data = [i.get_data_for_insert() for i in groups] print(group_data) for i in range(number): bosses[i].companyId = companies[i]._id # keng?? companies[i].userId = bosses[i]._id boss_data = [i.get_data_for_insert() for i in bosses] company_data = [i.get_data_for_insert() for i in companies] staff_sql = Staff.get_insert_sql() group_sql = Group.get_insert_sql() company_sql = Company.get_insert_sql() database.execute_change(staff_sql, tuple(boss_data)) database.execute_change(company_sql, tuple(company_data)) database.execute_change(group_sql, tuple(group_data)) return companies, groups
def test_employee_job_hopping(self): company3 = Company("Apple3") company4 = Company("Apple4") emp12 = Employee("Larry", title = "Software Enigineer", salary = 4000, rank = 3) company3.addEmployee(emp12) company3.removeEmployee(emp12) company4.addEmployee(emp12) self.assertTrue(company4.getTotalNumOfEmployee() is 1 and company3.getTotalNumOfEmployee() is 0)
def populate(self, legion_type): self.name = legion_type.name self.enum = legion_type lt = self.legion_type = LegionHandler.load(legion_type) for company_type, amount in lt.companies.items(): for i in range(amount): comp = Company(self) comp.populate(company_type) self.companies.append(comp)
def download_data(): # mkdir folder = os.path.exists(r".\\data\\") if not folder: os.makedirs(".\\data") company_list = Company.get_code_list() company_list = [Company(x) for x in company_list] [i.start() for i in company_list] [i.join() for i in company_list]
def generate_company(number): bosses = [Staff() for i in range(number)] companies = [Company() for i in range(number)] for i in range(number): bosses[i].companyId = companies[i]._id # keng?? companies[i].userId = bosses[i]._id boss_data = [i.get_data_for_insert() for i in bosses] company_data = [i.get_data_for_insert() for i in companies] staff_sql = Staff.get_insert_sql() company_sql = Company.get_insert_sql() database.execute_change(staff_sql, tuple(boss_data)) database.execute_change(company_sql, tuple(company_data)) return companies
def scrapCompanies(url): print "Scrapping contents from : "+url links[url] = True opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] page = opener.open(url) soup = bs4.BeautifulSoup(page,"lxml") trs = soup.select("#company-list table.my-table tr") for tr in trs: tds = tr.select("td") if len(tds) > 4: company = {} company["name"] = tds[2].getText().strip() company["symbol"] = tds[3].getText().strip() company["sector"] = tds[4].getText().strip() company["detaillink"] = "" linkTD = tds[5] companyLinks = linkTD.select("a") if len(companyLinks) > 0: detailLink = companyLinks[0].attrs["href"] company["detaillink"] = detailLink scrapCompanyDetail(company) print company print "********************" company1 = Company(company["name"],company["symbol"],company["sector"],company["detaillink"]) company1.displayCompany() companies.append(company) ##Delete this #if company["sector"] != "Sector": # break; allLinks = soup.select("a") for link in allLinks: if 'title' in link.attrs: title = link.attrs["title"]; if title.find("Go to Page") != -1 or title.find("Next Page") != -1 or title.find("Last Page") != -1: url = link.attrs["href"].strip() if url not in links: links[url] = False nextUrl = '' for link in links: if links[link] == False: nextUrl = link break
def deserialize_json_to_object(self): raw_items = [] companies = [] ending_brace = False with open(self.__file_name, "r") as file: for line in file: if self.LEFT_BRACE in line: ending_brace = False elif self.RIGHT_BRACE in line: ending_brace = True if not ending_brace and self.LEFT_BRACE not in line and "[" not in line: raw_items.append((line.replace('\n', '').replace('"', '').replace(',', '').strip()).split(':')) if len(raw_items) == self.MAX_OBJECTS: company = Company() for iRawItem in raw_items: if "State" in iRawItem: company.set_state(iRawItem[1].strip()) elif "Company" in iRawItem: company.set_company(iRawItem[1].strip()) elif "NumOfEmployees" in iRawItem: company.set_number_of_employees(int(iRawItem[1].strip())) companies.append(company) raw_items.clear() return companies
def loadCompany(self, companyName): newCompany = Company(companyName) newCompany.loadTicker() self.companies.append(newCompany) # reset minDate # minDate is the latest start date of all companies in portfolio startDates = [] for company in self.companies: startDates.append(company.loadedStartDate) self.minDate = max(startDates) # reset maxDate # maxdate is the earliest end date of all companies in portfolio endDates = [] for company in self.companies: endDates.append(company.loadedEndDate) self.maxDate = min(endDates)
def create_company(self, data, input_file): ticker_temp = data[0] ticker_long = ticker_temp.split() ticker = ticker_long[0] bloomberg_ticker = data[0] name = data[1] industry = data[2] earnings_per_share = data[13:28] revenue_per_share = data[28:43] operating_income = data[43:58] dividends_paid = data[58:73] # Används ej. free_cash_flow = data[73:88] roe = data[88:103] roic = data[103:118] company_value = data[118:133] market_cap = data[133:148] price = data[148:163] book_value = data[163:178] # Borde läggas till. wacc = data[178:193] eqy_shares_out = data[193:208] # Borde läggas till. dividend_forecast = data[208:223] px_volume = data[223:238] dividends = data[238:253] # company = Company(name, bloomberg_ticker, ticker_short, earnings_per_share, revenue_per_share, dividend_forecast, dividends, free_cash_flow, roe, roic, operating_income, price, wacc, company_value, px_volume) company = Company(name, bloomberg_ticker, ticker, industry, earnings_per_share, revenue_per_share, dividend_forecast, dividends, free_cash_flow, roe, roic, operating_income, price, wacc, company_value, market_cap, px_volume) return company
def __import_companies(cls): try: with open('../Data/CompaniesData.txt', 'r') as f: lines_arr = [] for line in f: line = line.strip() lines_arr.append(line) if line == '#': tax_id = lines_arr[0] founder_name = lines_arr[1] founder_surname = lines_arr[2] company_name = lines_arr[3] company_address = lines_arr[4] foundation_year = lines_arr[5] lines_arr.clear() company = Company(founder_name, founder_surname, company_name, company_address, tax_id, foundation_year) cls.companies_list.append(company) lines_arr.clear() except BaseException: pass
def scrape_companies(): companies = [] chrome_options = Options() chrome_options.add_argument("--headless") driver = webdriver.Chrome('./driver/chromedriver.exe', options=chrome_options, service_log_path='NUL') driver.get( 'https://idx.co.id/data-pasar/laporan-statistik/ringkasan-performa-perusahaan-tercatat/' ) entries_option = driver.find_element_by_xpath( '/html/body/main/div/div[3]/div[1]/label/select/option[4]') driver.execute_script("arguments[0].value = '999999';", entries_option) entries_combobox = Select( driver.find_element_by_name('performanceTable_length')) sleep(3) entries_combobox.select_by_visible_text('100') sleep(3) table = driver.find_elements_by_tag_name('tbody')[0] rows = table.find_elements_by_tag_name('tr') print("\nLoading", len(rows), "companies.") for i in range(len(rows)): cols = rows[i].find_elements_by_tag_name('td') company_code = cols[1].text company_name = cols[2].text pdf_link = cols[3].find_elements_by_tag_name('a')[0].get_attribute( 'href') der, pbv, roe, per = extract_data_from_pdf(pdf_link) passed = evaluate_data(der, pbv, roe, per) current_company = Company(i + 1, company_code, company_name, pdf_link, passed, der, pbv, roe, per) print(f"\n({i+1}/{len(rows)})>", current_company.to_string()) companies.append(current_company) driver.quit() return companies
def crawl_page(skip): """ Get urls and return companies from the page specified skip. """ URL = "http://ccs.career.cornell.edu/CareerFair/CarFair_SearchCode.php?link=yes&Company_Name=*&skip=" + str(skip) r = requests.get(URL) html = r.text.encode('utf-8') soup = soupparser.fromstring(html) trs = soup.xpath("//table[@id='mytables']//table[1]/tr")[3:] companies = [] for tr in trs: a = tr.xpath("td/a")[0] name, url = a.text, "http://ccs.career.cornell.edu/CareerFair/" + a.attrib['href'] id = get_id(url) company = Company(id) company.name = name company.url = url company = parse_company(company, url) companies.append(company) return companies
def loadCompaniesDict(): companies = json.loads(open('./Companies.json').read()) currentCompanies = {} for company in companies: currentCompany = Company(company['id'], \ company['company'], \ company['city'], \ company['timezone']) currentCompanies.update({currentCompany.id: currentCompany}) return currentCompanies
def create_companies_dictionary(path_to_csv): csv_data = read_csv(path_to_csv) companies = dict() for index, row in csv_data.iterrows(): vendor = row['Vendor'] if vendor not in companies.keys(): company_temp = Company(vendor) companies[vendor] = company_temp company = companies[vendor] company.add_product(row['Product'], row['Amount']) return companies
def url_load(): url = "http://top500.welt.de/list/2014/U/?i=1000&e=1000&p=1" text = r.get(url).text tree = fromstring(text) #tree.make_links_absolute(url) pos = 0 companies = [] for el in tqdm.tqdm(tree.find_class("grid_6")): a = el.find(".//a") if a is None: continue pos += 1 href = a.attrib["href"] detail_id = [int(s) for s in href.split("/") if s.isdigit()][-1] name = str(a.text_content()) comp = Company(name, detail_id, pos) details = get_details(detail_id) comp.set_details(details) companies.append(comp) return companies
def createCompanies(self,serviceList = ["dessert","breakfast","coffee","drinks","salad"]): # initialize a list companyList = [] # iterate company count times for i in range(self.companyCount): if i == 0: # create a random company currentCompany = Company("Main Company" # company name , random.randint(10, 20) # long , random.randint(10, 20), # lat [serviceList[random.randint(0, len(serviceList)) - 1], serviceList[random.randint(0, len(serviceList)) - 1]], random.randint(20, 30), # price random.randint(10, 20), # cost random.randint(400, 4000), # sales Vol random.randint(10000, 100000), # fixed cost random.randint(5, 20) / 100) # tax rate else: # create a random company currentCompany = Company("Competitor{}".format(i) # company name ,random.randint(10,20) # long ,random.randint(10,20), # lat [serviceList[random.randint(0,len(serviceList))-1],serviceList[random.randint(0,len(serviceList))-1]], random.randint(20,30), # price random.randint(10,20), # cost random.randint(400,4000), # sales Vol random.randint(10000,100000),# fixed cost random.randint(5,20)/100) # tax rate # append the list companyList.append(currentCompany) # assign the class's list self.companyList = companyList
def company_details(url_encoded_name, year): if app.debug: print request.args qry = """SELECT * FROM Film INNER JOIN CompanyCredits ON (CompanyCredits.film_imdblink = Film.imdblink) INNER JOIN Company ON (CompanyCredits.company_imdblink = Company.imdblink) WHERE Film.url_encoded_name = :film AND Film.year = :y;""" cursor = g.conn.execute(text(qry), film = url_encoded_name, y = year) companies = [] for result in cursor: companies.append(Company(result)) cursor.close() cache['companies'] = companies cache['current_film'] = companies[0].film_name cache['current_film_imdblink'] = companies[0].film_imdblink return render_template("company-details.html", **cache)
def tasks(): d1 = JavaDeveloper("Jack", 4) d2 = PythonDeveloper("Carl", 10) d3 = RubyDeveloper("Nick", 1) info_about_developer(d1) info_about_developer(d2) info_about_developer(d3) print(d1) print(d2) print(d3) developer = PythonDeveloper("Brad", 2) developer() print("\n\n") company = Company() company += d1 company += d2 company += d3 print(company) company.fire("Nick") print(company)
def setup(self): for i in range(self.num_agents): a = Company(i, self, self.strat(), self.tech(), self.market_cap()) self.schedule.append(a) initial_emissions = [] prod = [] for i in self.schedule: initial_emissions.append(i.produce_initial()) prod.append(i.prod_t) i.setup() emissions = sum(initial_emissions) print("Total Production Prior to Cap: " + str(sum(prod))) print("Emissions Prior to Cap: " + str(emissions)) self.num_allow = int((1 - self.initial_cap) * emissions) self.max_allow = self.num_allow print("Number of allowances under cap: " + str(self.num_allow))
def main(): parser = argparse.ArgumentParser() parser.add_argument('-test', action="store_true") parser.add_argument('-clean', action='store_true') parser.add_argument('-n', type=str) parser.add_argument('-s', type=str) parser.add_argument('-m', type=str) parser.add_argument('-co', type=str) parser.add_argument('-ci', type=str) parser.add_argument('-fo', type=str) parser.add_argument('-r', type=int) parser.add_argument('-ir', type=str) parser.add_argument('-fi', type=str) parser.add_argument('-li', type=str) parser.add_argument('-fr', type=int) parser.add_argument('-ft', type=float) parser.add_argument('-ff', type=str) parser.add_argument('-lf', type=str) parser.add_argument('-k', type=int, default=9) args = parser.parse_args() if args.test: test(args.k) else: if (not is_initialized()) or args.clean: initialize() date_inputs = [args.fi, args.li, args.ff, args.lf, args.fo] i = 0 while i < len(date_inputs): if date_inputs[i]: date_inputs[i] = datetime.date(int(date_inputs[i][:4]), int(date_inputs[i][5:7]), int(date_inputs[i][8:10])) i += 1 company = Company(args.n, args.s, args.m, args.co, args.ci, date_inputs[4], args.r, args.ir, date_inputs[0], date_inputs[1], args.fr, args.ft, date_inputs[2], date_inputs[3]) print(classify(company, args.k))
def getCompanyInformation(self): service_link = "http://tevhitkarsli.com/getCompanyInformation.php" myRequest = requests.post(url=service_link, auth=self.my_auth) if str(myRequest.text) == '[]': return None companies_text = json.loads(myRequest.text) all_companies_list = [] for i in range(0, len(companies_text)): company = Company(companies_text[0]['CompanyID'], companies_text[0]['CompanyName'], companies_text[0]['CompanyNote']) all_companies_list.append(company) return all_companies_list
def add_company(cls, is_complete, founder_name, founder_surname, company_name, company_address, tax_id, foundation_year): EMPTY = '' company_exists, company = CompanyManagement.check_company_existence( tax_id) if (company_exists == False and founder_name != EMPTY and founder_surname != EMPTY and company_name != EMPTY and company_address != EMPTY and tax_id != EMPTY and foundation_year != EMPTY): with open('../Data/CompaniesData.txt', 'a') as f: company = Company(founder_name, founder_surname, company_name, company_address, tax_id, foundation_year) CompanyManagement.companies_list.append(company) f.write(company.get_tax_id() + '\n') f.write(company.get_founder_name() + '\n') f.write(company.get_founder_surname() + '\n') f.write(company.get_company_name() + '\n') f.write(company.get_company_address() + '\n') f.write(company.get_foundation_year() + '\n') f.write('#' + '\n') is_complete.set("Success") elif (company_exists and founder_name != EMPTY and founder_surname != EMPTY and company_name != EMPTY and company_address != EMPTY and tax_id != EMPTY and foundation_year != EMPTY): is_complete.set("Already Exists") else: is_complete.set("Failed")
def PB_C(self,): # clear all outputs self.clearOutputs() #getting the inputs and exception handling if self.input_stockticker.text() is not '': stockticker = self.input_stockticker.text() else: self.output_rec.setText('Error: Please input a stockticker.') raise exit if self.input_forecastedGrowthRate.text() is not '': forecasted_growth_rate = float( self.input_forecastedGrowthRate.text()) else: forecasted_growth_rate = None if self.input_perpetualGrowthRate.text() is not '': perpetual_growth_rate = float( self.input_perpetualGrowthRate.text()) else: perpetual_growth_rate = None if self.input_quandl_API_key.text() is not '': quandl.ApiConfig.api_key = self.input_quandl_API_key.text() else: self.output_rec.setText('Error: Please input an Quandl API Key.'+ ' Go to https://www.quandl.com/') raise exit #test if quandl apikey is valid count = 0 while True: try: quandl.Dataset('WIKI/AAPL').data() break except: count += 1 if count >8: self.output_rec.setText('Error: Invalid Quandl API key. ' + 'Please check your API key '+ 'and try again') raise exit #test if stockticker is valid count = 0 while True: try: ignore = quandl.Dataset('WIKI/' + stockticker).data() break except: count += 1 if count >8: self.output_rec.setText('Error: Invalid stockticker.') raise exit #initialise Company object and store 'data' attribute [dict] company = Company(stockticker,quandl.ApiConfig.api_key) data = company.data # Calculations using Company data and dcffunctions #tax rate atr = d.avg_tax_rate(data['taxexp'],data['ebt']) #change in networking capital cwc = d.cwc(data['ybca'],data['yeca'],data['ybcl'],data['yecl']) #fcf valuation fcf = d.fcf(data['lastebit'], atr, data['dep'], data['amor'], cwc, data['capex']) #5 years free cash flow #outputs all five years as a tuple #(convert to list, then return individual years) fiveyearfcf = {} if forecasted_growth_rate is None: fiveyearfcf = d.five_year_fcf(fcf) else: fiveyearfcf = d.five_year_fcf(fcf,forecasted_growth_rate) #cost of equity re = d.re(data['rm'],data['rf'],data['beta']) #cost of debt rd = d.rd(data['intexp'],data['LTD']) #weighted average cost of capital wacc = d.wacc(data['marketcap'],data['lastD'],atr,re,rd) #terminal value - not part of ui, but included in calculation tv = 0 if perpetual_growth_rate is None: tv = d.tv(fiveyearfcf,wacc) else: tv = d.tv(fiveyearfcf,wacc, perpetual_growth_rate) #discounted cash flow for the current year dcf = d.dcf(fiveyearfcf,wacc,tv) #target price - not part of ui, but included in calculation target_price = d.target_price(dcf,data['lastD'],data['numshares']) #recommendation rec = d.recommendation( data['current_price'],data['stdev'],target_price) #generate the outputs #ebit - imported as a variable self.output_ebit.setText(d.convert(data['lastebit'])) self.output_tax_rate.setText(str(round(atr,4))) #depreciation and amortization - imported as a variable self.output_depr.setText(d.convert(data['dep'])) self.output_amor.setText(d.convert(data['amor'])) #capital expenditure - imported as a variable self.output_capex.setText(d.convert(data['capex'])) self.output_cwc.setText(d.convert(cwc)) self.output_fcf.setText(d.convert(fcf)) self.output_y0.setText(d.convert(fiveyearfcf[0])) self.output_y1.setText(d.convert(fiveyearfcf[1])) self.output_y2.setText(d.convert(fiveyearfcf[2])) self.output_y3.setText(d.convert(fiveyearfcf[3])) self.output_y4.setText(d.convert(fiveyearfcf[4])) self.output_y5.setText(d.convert(fiveyearfcf[5])) #market capitalization - imported as a variable self.output_marketcap.setText(d.convert(data['marketcap'])) #beta - imported as a variable self.output_beta.setText(str(data['beta'])) #expected market returns - imported as a variable self.output_emr.setText(str(round(data['rm'],4))) self.output_coe.setText(str(round(re,4))) #total long term debt - imported as a variable self.output_ltd.setText(d.convert(data['lastD'])) self.output_cod.setText(str(round(rd,4))) self.output_wacc.setText(str(round(wacc,4))) self.output_dcf.setText(d.convert(dcf)) self.output_rec.setText(rec) #insertPlainText
week_salary = int(line[2]) empls_list.append(SalariedEmployee(name, int(line[1]), [], week_salary)) return empls_list def read_hourly_employees_from_file(path): empls_list = [] with open(path) as g: for line in g: line = line.strip() line = line.split(";") empls_list.append(HourlyEmployee(line[0], int(line[1]), [], int(line[2]), int(line[3]))) return empls_list if __name__ == '__main__': #print_lines_in_file("tasks.txt") #lines_list = read_lines_in_file("tasks.txt") #print(lines_list) tasks = read_tasks_from_file("tasks.txt") salaried_empls = read_salaried_employees_from_file("salariedemployees.txt") hourly_empls = read_hourly_employees_from_file("hourlyemployees.txt") empls = salaried_empls + hourly_empls company = Company("QuickGeo", empls, tasks) company.dist_task(2) company.work_all() company.print_emp() print(company.monty_salary)
def __init__(self, companies_id_list): self.companies_list = [] for id in companies_id_list: newc = Company(id) self.companies_list.append(newc)
upcoming_movie1 = Movie('Killers of the flower Moon','Martin Scorsese','Drama',['Robert DeNiro','Leonardo DiCaprio']) upcoming_movie2 = Movie('Avatar 2','James Cameron','Sci-Fi',['Sam Worthington','Zoe Zaldana']) upcoming_movie3 = Movie('Anerxomeni3','Unknown','Action',['prwtos','deuteros']) movie1.updateEarnings(560341000.) movie1.updateTickets(70042625) movie2.updateEarnings(150000000.) movie2.updateTickets(18750000) movie3.updateEarnings(2790439000.) movie3.updateTickets(348804875) movie4.updateEarnings(345800450.) movie4.updateTickets(43225056) movie5.updateEarnings(230170000.) movie5.updateTickets(28771250) company = Company(1) cinema1 = Cinema('Athina',20) cinema2 = Cinema('Thesaloniki',15) cinema3 = Cinema('Volos',10) cinema4 = Cinema('Xalkida',2) cinema5 = Cinema('Larisa',10) cinema6 = Cinema('Kifisia',15) cinema7 = Cinema('Pireas',10) cinema8 = Cinema('Xania',5) cinema9 = Cinema('Ioannina',8) cinema10 = Cinema('Alexandroupoli',3) cinema1.setPeakHours(0,34) # 0 ennow stis5 cinema1.setPeakHours(1,70) cinema1.setPeakHours(2,70)
if __name__ == "__main__": url = 'https://www.trustpilot.com/review/www.dropbox.com' page = requests.get(url).text soap = BeautifulSoup(page, 'html.parser') name = soap.find('span', {'class': 'multi-size-header__big'}).text.strip() average_rating = soap.find('p', { 'class': 'header_trustscore' }).text.strip() review_count = int( soap.find('h2', { 'class': 'header--inline' }).text.strip().split('•')[0].replace(',', '')) review_per_page = 20 pages = review_count // review_per_page if review_count % review_per_page != 0: pages = review_count // review_per_page + 1 company = Company(name, average_rating) review_scrapper = ReviewScrape() for page in range(1, pages + 1): page_url = f'{url}?languages=all&page={page}' company.add_reviews(review_scrapper.scrape(page_url)) time.sleep(10) print(f"page {page}") ReviewUtil.save(f'{company.company_name}.csv', company.reviews)
def company(self): if self._company is None: self._company = Company.get_by_id(self.company_key.id()) return self._company
print("\t1 - Crear Compañia") print("\t2 - Crear Cliente") print("\t3 - Crear Producto") print("\t4 - Realizar Compra") print("\t9 - salir") while True: menu() opcionMenu = input("Elije una opcion : ") if opcionMenu == "1": company = Company() nombreCompany = input('Ingrese nombre de la compañia ').upper() precioCompany = input('Ingrese rfc de la compañia ') direccionCompany = input('Ingrese dirección de la compañia ') company.insertarCompany(nombreCompany, precioCompany, direccionCompany) input("Presione enter para continuar") elif opcionMenu == "2": cliente = Cliente() company2 = Company() company2.fetchCompany() if len(company2.companyList) == 0: print("No puedes crear cliente sin compañias existentes")
def test_company_employee_count(self): company1 = Company("Apple") emp9 = Employee("Ian", title = "Program Manager", salary = 7000, rank = 4) company1.addEmployee(emp9) self.assertTrue(company1.getTotalNumOfEmployee() is 1)
def grab_position(keyword, gj='', xl='', yx='', gx='', st='', lc='', workAddress='', city='全国'): reload(sys) sys.setdefaultencoding('utf-8') url = "http://www.lagou.com/jobs/list_" + keyword params = {'spc': 1, 'pl': "", 'xl': xl, 'yx': yx, 'gx': gx, 'st': st, 'labelWords': '', 'city': city, 'requestId': ""} list = requests.get(url, params=params) parser = BeautifulSoup(list.content) # print list.content posistions = [] companys = [] pos_html = parser.find_all("div", class_="hot_pos_l") cmp_html = parser.find_all("div", class_="hot_pos_r") for i in range(len(pos_html)): p2 = BeautifulSoup(repr(pos_html[i])) cmp_parser = BeautifulSoup(repr(cmp_html[i])) p = Position() c = Company() # 抓取职位信息 spans = p2.find_all('span') p.salary = spans[1].text p.experience = spans[2].text p.edu = spans[3].text p.candy = spans[4].text p.name = pos_html[i].div.a.text p.place = pos_html[i].div.span.text p_url = pos_html[i].div.a['href'] p_detail = requests.get(p_url) detail_parser = BeautifulSoup(p_detail.content) p.detail = detail_parser.find_all("dd", class_="job_bt")[0].text # 抓取公司信息 hot_pos_r = cmp_parser.find_all("span") if len(hot_pos_r) == 4: c.field = hot_pos_r[0].text c.founder = hot_pos_r[1].text c.funding = hot_pos_r[2].text c.extent = hot_pos_r[3].text elif len(hot_pos_r) == 3: c.field = hot_pos_r[0].text c.funding = hot_pos_r[1].text c.extent = hot_pos_r[2].text # 关联 tmp = cmp_parser.find_all("a") c_id_a = tmp[1] c.name = c_id_a.text c_id = c_id_a['href'] c_id = re.findall('\d+', c_id) cmp_page = requests.get(c_id_a['href']) page_parser = BeautifulSoup(cmp_page.content) intro = page_parser.find_all("div", class_="c_intro") if len(intro) > 0: c.mainPage = intro[0].text else: c.mainPage = '暂无简介' p.cmp_id = c_id c.id = c_id write_to_db.add_pos(p, 'position') write_to_db.add_pos(c, 'company') print len(posistions)
company_dict = { "goog": pagedata.alphabet, "aapl": pagedata.apple, "brk": pagedata.berkshire_hathaway, "fb": pagedata.facebook, "ilmn": pagedata.illumina, "jnj": pagedata.johnson_and_johnson, "lin": pagedata.linde, "msft": pagedata.microsoft, "nflx": pagedata.netflix, "nvda": pagedata.nvidia, "tsla": pagedata.tesla, "txn": pagedata.texas_instruments, "ul": pagedata.unilever, } all_structured_press_releases = [] for arg in vars(args): if arg in company_dict.keys() and (getattr(args, arg) or args.all): company = Company(arg, company_dict[arg]) company_struct = company.get_structured_press_releases() all_structured_press_releases = [ *all_structured_press_releases, *company_struct, ] all_structured_press_releases.sort(key=lambda x: x[1]) display_structured_press_releases(all_structured_press_releases, args.weeks)
# May want to use adjusted close price instead of regular close price df_goog = pd.read_excel( "C:\Justin\PYTHON\Stock Project\Historical Data Excel\GOOG_Data.xlsx" ) df_amzn = pd.read_excel( "C:\Justin\PYTHON\Stock Project\Historical Data Excel\AMZN_Data.xlsx" ) df_ford = pd.read_excel( "C:\Justin\PYTHON\Stock Project\Historical Data Excel\FORD_Data.xlsx" ) goog_company = Company( "GOOG", pd.to_datetime(df_goog["Date"]).dt.date.tolist(), df_goog["Adj Close"].tolist(), df_goog["Volume"].tolist(), ) amzn_company = Company( "AMZN", pd.to_datetime(df_amzn["Date"]).dt.date.tolist(), df_amzn["Adj Close"].tolist(), df_amzn["Volume"].tolist(), ) ford_company = Company( "FORD", pd.to_datetime(df_ford["Date"]).dt.date.tolist(), df_ford["Adj Close"].tolist(), df_ford["Volume"].tolist(),
def PB_C(self, ): #getting the inputs stockticker = self.input_stockticker.text() if self.input_forecastedGrowthRate.text() is not '': forecasted_growth_rate = float( self.input_forecastedGrowthRate.text()) else: forecasted_growth_rate = None if self.input_perpetualGrowthRate.text() is not '': perpetual_growth_rate = float( self.input_perpetualGrowthRate.text()) else: perpetual_growth_rate = None company = Company(stockticker) data = company.data #generate the outputs #ebit - imported as a variable self.output_ebit.setText(d.convert(data['lastebit'])) #tax rate atr = d.avg_tax_rate(data['taxexp'], data['ebt']) self.output_tax_rate.setText(str(round(atr, 4))) #depreciation and amortization - imported as a variable self.output_depr.setText(d.convert(data['dep'])) self.output_amor.setText(d.convert(data['amor'])) #capital expenditure - imported as a variable self.output_capex.setText(d.convert(data['capex'])) #change in networking capital cwc = d.cwc(data['ybca'], data['yeca'], data['ybcl'], data['yecl']) self.output_cwc.setText(d.convert(cwc)) #fcf valuation fcf = d.fcf(data['lastebit'], atr, data['dep'], data['amor'], cwc, data['capex']) self.output_fcf.setText(d.convert(fcf)) #5 years free cash flow #outputs all five years as a tuple #(convert to list, then return individual years) fiveyearfcf = {} if forecasted_growth_rate is None: fiveyearfcf = d.five_year_fcf(fcf) else: fiveyearfcf = d.five_year_fcf(fcf, forecasted_growth_rate) self.output_y0.setText(d.convert(fiveyearfcf[0])) self.output_y1.setText(d.convert(fiveyearfcf[1])) self.output_y2.setText(d.convert(fiveyearfcf[2])) self.output_y3.setText(d.convert(fiveyearfcf[3])) self.output_y4.setText(d.convert(fiveyearfcf[4])) self.output_y5.setText(d.convert(fiveyearfcf[5])) #market capitalization - imported as a variable self.output_marketcap.setText(d.convert(data['marketcap'])) #beta - imported as a variable self.output_beta.setText(str(data['beta'])) #expected market returns - imported as a variable self.output_emr.setText(str(round(data['rm'], 4))) #cost of equity re = d.re(data['rm'], data['rf'], data['beta']) self.output_coe.setText(str(round(re, 4))) #total long term debt - imported as a variable self.output_ltd.setText(d.convert(data['lastD'])) #cost of debt rd = d.rd(data['intexp'], data['LTD']) self.output_cod.setText(str(round(rd, 4))) #weighted average cost of capital wacc = d.wacc(data['marketcap'], data['lastD'], atr, re, rd) self.output_wacc.setText(str(round(wacc, 4))) #terminal value - not part of ui, but included in calculation tv = 0 if perpetual_growth_rate is None: tv = d.tv(fiveyearfcf, wacc) else: tv = d.tv(fiveyearfcf, wacc, perpetual_growth_rate) #discounted cash flow for the current year dcf = d.dcf(fiveyearfcf, wacc, tv) self.output_dcf.setText(d.convert(dcf)) #target price - not part of ui, but included in calculation target_price = d.target_price(dcf, data['lastD'], data['numshares']) #recommendation rec = d.recommendation(data['current_price'], data['stdev'], target_price) self.output_rec.setText(rec) #insertPlainText
import sys sys.path.append('/app/address_book/backend/module') sys.path.append('/zyt/python/work/work/address_book/backend/module') from Company import Company from Attendance import Attendance from datetime import datetime, timedelta from DB import DB from db import config from Logger import Logger logger = Logger(handler='file', handler_file_dir='/app/log') # logger = Logger() c = Company() dbase = config.DATABASE # These cities have no employees yet skip_cities = [] gender_color = {'M': 'blue', 'F': 'red', 'U': 'white'} db_con, db_cur = DB().connect() test_con, test_cur = DB().connect(host=config.HOST, user=config.USERNAME, passwd=config.PASSWORD) def is_connected_contact(): global test_con, test_cur try: test_con.ping() except: test_con, test_cur = DB().connect(host=config.HOST,
def mapCompany(self, object): json = object return Company(id=json['id'], name=json['name'], address=json['address'], city=json['city'], country=json['country'], mail=json['mail'], phone=json['phone'])
from Person import Person from Order import Order from Product import Product from Company import Company from Client import Client from Employee import Employee from datetime import datetime company = Company() person1 = Client("ahmad", "0993893", "male", "*****@*****.**") person2 = Employee("majed", "46565", "male", 5000, "12 hours") # person3 = Person("sara", "546852", "female") # client1 = Client("ahmad", "0993893", "male", "*****@*****.**") # client2 = Client("majed", "5645655", "male", "*****@*****.**") # client3 = Client("sara", "546852", "female", "*****@*****.**") # employee1 = Employee("majed", "46565", "male", 5000, "12 hours") # employee2 = Employee("abody", "78465", "male", 4000, "8 hours") # employee3 = Employee("amira", "44465", "female", 3000, "4 hours") product1 = Product("tide", 100) product2 = Product("fire", 50) product3 = Product("shambow", 30) product4 = Product("ditol", 40) product5 = Product("dak", 21.6)