Example #1
0
def main():
    print(TITLE)
    print("... Starting a new webscrape ...\n")
        
    # Downloads the URL of the fund list link.
    fund_list_url = get_url(FUND_LIST_LINK)
    fund_list_soup = BeautifulSoup(fund_list_url, 'html.parser')
    fund_links = get_fund_links(fund_list_soup)
    print(f"... Found {len(fund_links)} health fund links ...\n")

    # Creates all possible criterias and puts it in CRITERIA_FILE.
    create_all_criteria()
    print("... Created criteria file ...")
    crit_file = open(CRITERIA_FILE, 'r')

    for line in crit_file:
        # 1. Grabs the criteria.
        line = line[:len(line)-1]
        print(f"... Scraping online for {line} criteria ...")
        cur_criteria = Criteria(list(line))
        # 2. Gets all pdf_links for cur_criteria in a 2D dictionary.
        pdfs_dict = scrape_single_criteria(cur_criteria, fund_links)
        # Creates excel sheet.
        sheet = EXCEL_SHEET + "Criteria " + line + " " + \
            datetime.datetime.now().strftime("%d %B %Y at %H.%M") + ".xlsx"
        create_excel(sheet)
        # Scrapes pdfs into sheet.
        scrape_all_pdfs(pdfs_dict, sheet)
        break


    crit_file.close()
Example #2
0
def main():
    print(f"\n\n                ======== {timeStamp()} :: Saaty ========\n\n")
    comment("Enter number of criteria")
    numberOfCriteria = int(get())
    criteriaArray = []
    for _ in range(numberOfCriteria):
        comment(f"Enter critera, {numberOfCriteria-_} left: ")
        cname = get("Criteria name: ")
        criteriaArray.append(Criteria(cname))
    preferenceMatrix = buildPrefrenceMatrix(criteriaArray)
    _, consistencyRatio = processPreferenceMatrix(preferenceMatrix,
                                                  criteriaArray)

    if consistencyRatio < 0.1:
        comment("Computed criteria weights are valid for use.", empty=True)
        sortCriteria(criteriaArray)
        comment("Criteria weights for the specified matrix are: ")
        for crt in criteriaArray:
            comment(f"rank: {crt.rank} - {crt.name}: {round(crt.weight,3)}",
                    empty=True)
        comment(f"with consistency ratio of: {round(consistencyRatio,4)}",
                empty=True)
    else:
        comment(
            "Computed criteria weights are not valid for use! please check priority rating.",
            empty=True)
        comment(f"with consistency ratio of: {round(consistencyRatio,4)}",
                empty=True)
 def __init__(self):
     """Private Properties."""
     self._xbmcaddon = Xbmc_addon()  # Facade and Decorator of xbmcaddon.
     self._xbmcplugin = Xbmc_plugin()  # Facade of xbmcplugin.
     self._xbmcgui = Xbmc_gui()  # Facade and Decorator of xbmcgui.
     self._handle = int(sys.argv[1])  # Plugin's id.
     self._path = self._xbmcaddon.getAddonInfo('path')  # Path of plugin in OS. Eg.: home/[usr]/.xbmc/addons/[plugin_name]
     self._url = str(sys.argv[0])  # Plugin's URL. Eg.: plugin://[plugin_name]
     self._nav = sys.argv[2].strip('?')  # Navigation route by user through plugin. Eg.: Movies_Action
     self._media_folder = self._path + config.PLUGIN_MEDIA_FOLDER  # Media folder path.
     self._dictionaries = Dictionaries()  # Dictionaries of this plugin.
     self._criteria = Criteria()  # Criteria filter class.
     """Public Properties."""
     self.settings = Settings()  # Settings of user for this plugin.
class Plugin():
    """Mega Pack Plugin"""

    def __init__(self):
        """Private Properties."""
        self._xbmcaddon = Xbmc_addon()  # Facade and Decorator of xbmcaddon.
        self._xbmcplugin = Xbmc_plugin()  # Facade of xbmcplugin.
        self._xbmcgui = Xbmc_gui()  # Facade and Decorator of xbmcgui.
        self._handle = int(sys.argv[1])  # Plugin's id.
        self._path = self._xbmcaddon.getAddonInfo('path')  # Path of plugin in OS. Eg.: home/[usr]/.xbmc/addons/[plugin_name]
        self._url = str(sys.argv[0])  # Plugin's URL. Eg.: plugin://[plugin_name]
        self._nav = sys.argv[2].strip('?')  # Navigation route by user through plugin. Eg.: Movies_Action
        self._media_folder = self._path + config.PLUGIN_MEDIA_FOLDER  # Media folder path.
        self._dictionaries = Dictionaries()  # Dictionaries of this plugin.
        self._criteria = Criteria()  # Criteria filter class.
        """Public Properties."""
        self.settings = Settings()  # Settings of user for this plugin.

    """Implementation of IPlugin interface."""

    def run(self, menu):
        """Open requested menu by user."""
        if self._nav:
            menu.open(self._nav)  # Open requested menu by user.
        else:
            menu.open(config.PLUGIN_HOME)  # Startup menu (default).
        self._xbmcplugin.endOfDirectory(self._handle)

    def get_xplugins(self, dictionaries=[], genres=[], topics=[], sports=[],
                     features=[], languages=[], countries=[]):
        """Return a sleek dictionary as filter criteria."""
        dicts = self._dictionaries.get(dictionaries, \
                    'Adult' in self.settings.get_home_menu().values())
        xplugins = copy.deepcopy(dicts)
        delete_xplugin = True  # Flag to delete xplugins without any match.
        for name, xplugin in dicts.iteritems():
            for feed, meta in xplugin.iteritems():
                if isinstance(meta, dict):
                    if not self._criteria.evaluate(self.settings, meta,
                        genres, topics, sports, features, languages,
                        countries):  # TODO: or not self._settings.validate_general(meta):
                        del xplugins[name][feed]
                    else:
                        delete_xplugin = False
            if delete_xplugin:
                del xplugins[name]
            delete_xplugin = True
        return xplugins
import csv
import pandas as pd
from criteria import Criteria
import random

def get_writer(filename):
	o = open(filename, "w")
	writer = csv.writer(o)
	writer.writerow(["State", "Year", "Age", "Gender", "Race", "Household Income"])
	return writer

def write_test_data(c, writer):

	for state in ["Michigan", "Georgia", "Ohio", "Missouri", "Delaware", "Arizona", "Indiana", "NorthCarolina", "Oregon", "Washington"] :
		for year in ["2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014"]:
			hhincome = random.randint(0, 100000)
			row = [state, year, "6-16", "Black", "Female", hhincome]
			# print "About to write {} to disk".format(row)
			writer.writerow(row)


if __name__ == "__main__":

	c = Criteria()

	writer = get_writer("../Outputs/baby-test-file.csv")

	write_test_data(c, writer)
Example #6
0
def train():
    use_cuda = torch.cuda.is_available() and not args.no_cuda
    device = torch.device('cuda' if use_cuda else 'cpu')

    train_data = dataset.Data(args.data_root,
                              target_pixels=args.target_pixels,
                              target_width=args.target_width,
                              target_height=args.target_height,
                              use_number=args.use_number,
                              device=device)
    train_loader = DataLoader(train_data,
                              batch_size=args.mini_batch_size,
                              shuffle=True)

    model = Model(args.encoder,
                  rotation_scale=args.rotation_scale,
                  translation_scale=args.translation_scale,
                  depth_scale=args.depth_scale)

    criteria = Criteria(depth_weight=args.depth_weight,
                        regular_weight=args.regular_weight,
                        ref_weight=args.ref_weight,
                        ground_weight=args.ground_weight,
                        scale_weight=args.scale_weight,
                        average_weight=args.average_weight,
                        down_times=args.down_times,
                        warp_flag=args.warp_flag,
                        average_depth=args.average_depth,
                        regular_flag=args.regular_flag,
                        sigma_scale=args.sigma_scale)
    pcd = o3d.geometry.PointCloud()

    model = model.to(device)
    optimiser = torch.optim.Adam(model.parameters(), lr=args.lr)

    load_dir = os.path.join('checkpoint', args.model_name)
    if args.resume > 0 and os.path.exists(load_dir):
        model.load_state_dict(torch.load(os.path.join(load_dir, 'model.pth')))
        optimiser.load_state_dict(
            torch.load(os.path.join(load_dir, 'optimiser.pth')))
        if os.path.exists(os.path.join(load_dir, 'step.pth')):
            args.step_start = torch.load(os.path.join(load_dir,
                                                      'step.pth'))['step']
        if os.path.exists(os.path.join(load_dir, 'sigma.pth')):
            sigma = torch.load(os.path.join(load_dir, 'sigma.pth'))
            criteria.previous_sigma = sigma['previous_sigma']
            criteria.next_sigma = sigma['next_sigma']

    date_time = datetime.now().strftime("_%Y_%m_%d_%H_%M_%S")
    writer = SummaryWriter(os.path.join('runs', args.model_name + date_time))
    writer.add_text('args', str(args), 0)
    model.train()
    losses = []
    data_iter = iter(train_loader)
    for step in range(args.step_start, args.step_start + args.step_number):
        try:
            data_in = next(data_iter)
        except StopIteration:
            data_iter = iter(train_loader)
            data_in = next(data_iter)
        data_out = model(data_in)
        loss = criteria(data_in, data_out)
        loss.backward()
        losses.append(loss.item())
        if step % (args.batch_size / args.mini_batch_size) == 0:
            optimiser.step()
            optimiser.zero_grad()

        if step % args.summary_freq == 0:
            loss = sum(losses) / len(losses)
            print('step:%d loss:%f' % (step, loss))
            util.visualize(data_in)
            util.visualize(data_out)
            writer.add_scalar('loss', loss, global_step=step)
            writer.add_image('image/image',
                             data_in['image'][0],
                             global_step=step)
            writer.add_image('image/color_map',
                             data_in['color_map'][0],
                             global_step=step)
            writer.add_image('image/normal',
                             data_out['normal_v'][0],
                             global_step=step)
            writer.add_text('camera',
                            str(data_out['camera'][0].data.cpu().numpy()),
                            global_step=step)
            if 'depth_v' in data_in:
                writer.add_image('image/depth_in',
                                 data_in['depth'][0],
                                 global_step=step)
            if 'depth_v' in data_out:
                writer.add_image('image/depth_out',
                                 data_out['depth'][0],
                                 global_step=step)
            if 'ground' in data_out:
                writer.add_text('ground',
                                str(data_out['ground'][0].data.cpu().numpy()),
                                global_step=step)
            for key in data_out:
                if key.startswith('base_'):
                    writer.add_image('image/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('image_'):
                    writer.add_image('image/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('residual_'):
                    writer.add_image('residual/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('warp_'):
                    writer.add_image('warp/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('grad_'):
                    writer.add_image('grad/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('regular_'):
                    writer.add_image('regular/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('record_'):
                    writer.add_image('record/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('ground_'):
                    writer.add_image('ground/' + key,
                                     data_out[key][0],
                                     global_step=step)
                elif key.startswith('loss'):
                    writer.add_scalar('loss/' + key,
                                      data_out[key],
                                      global_step=step)
                elif key.startswith('eval_'):
                    writer.add_scalar('eval/' + key,
                                      data_out[key],
                                      global_step=step)
                elif key.startswith('motion'):
                    writer.add_text('motion/' + key,
                                    str(data_out[key][0].data.cpu().numpy()),
                                    global_step=step)
            losses = []

        if step % args.save_freq == 0:
            save_dir = os.path.join('checkpoint', args.model_name)
            os.makedirs(save_dir, exist_ok=True)
            torch.save(model.state_dict(), os.path.join(save_dir, 'model.pth'))
            torch.save(optimiser.state_dict(),
                       os.path.join(save_dir, 'optimiser.pth'))
            torch.save({'step': step}, os.path.join(save_dir, 'step.pth'))
            torch.save(
                {
                    'previous_sigma': criteria.previous_sigma,
                    'next_sigma': criteria.next_sigma
                }, os.path.join(save_dir, 'sigma.pth'))

            points = data_out['points'][0].data.cpu().numpy()
            points = points.transpose(1, 2, 0).reshape(-1, 3)
            pcd.points = o3d.utility.Vector3dVector(points)
            colors = data_in['image'][0].data.cpu().numpy()
            colors = colors.transpose(1, 2, 0).reshape(-1, 3)
            pcd.colors = o3d.utility.Vector3dVector(colors)
            o3d.io.write_point_cloud(
                os.path.join(save_dir,
                             '%s-%010d.pcd' % (args.model_name, step)), pcd)
            print('saved to ' + save_dir)

    writer.close()
Example #7
0
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from constants import *
from time import sleep
from policy import Policy
from web_scraper import *
from criteria import Criteria

browser = webdriver.Chrome()

browser.get("https://www.privatehealth.gov.au/dynamic/Insurer/Details/FAI")
nav_to_policy_page(browser)

criteria = Criteria("000000")
click_criteria(browser, criteria)

results_exist = browser.find_element_by_xpath("//div[@id='Results']/p").text
if NO_POLICIES_STR in results_exist:
    print("NO POLICIES FOUND.")
    exit(1)
fund_pdfs[fund] = {}

# Clicks all checkboxes and adds to compare page.
checkboxes = browser.find_elements_by_xpath(
    f"//input[@name='SelectedProductKeys']")
n_pols = len(checkboxes)
for box in checkboxes:
    statuses = browser.find_elements_by_xpath(
        "//td[@class='ResultColumn_Status']//span")
Example #8
0
def main():
    """Main entry point of the application"""

    #
    # Call parameter analysis
    #
    if len(sys.argv) != 3:
        print("ERREUR: Pas assez de parametres")
        usage()
        return

    if sys.argv[1][0] != '-':
        print(
            """ERREUR: Le premier parametre devrait etre "-1" ou "-2" (sans les guillemets)"""
        )
        usage()
        return

    if len(sys.argv[1]) == 1 or sys.argv[1][1] not in "13":
        print(
            """ERREUR: Le premier parametre devrait etre "-1" ou "-3" (sans les guillemets)"""
        )
        usage()
        return

    #
    # We display cos version before starting the processing
    #
    print()
    print("cos version 2.0.2")
    print()

    #
    # Read all configuration data
    #
    conf = Conf(sys.argv[2])

    #
    # Read the list of defenses
    #
    defenses = []
    f = openWithErrorManagement(key2inputFileName("defensesFilename", conf),
                                "r",
                                encoding=conf.get("encoding"))
    nbLinesRead = [0]
    name = lookForNonBlankLine(f, nbLinesRead, True, "Nom soutenance")
    while name != "":
        defenses.append(Defense(name))
        name = lookForNonBlankLine(f, nbLinesRead, True, "Nom soutenance")
    f.close()

    #
    # Read the list of students and the title of their defense
    #
    students = []
    f = openWithErrorManagement(key2inputFileName("studentsFilename", conf),
                                "r",
                                encoding=conf.get("encoding"))
    nbLinesRead = [0]

    lookForNonBlankLine(
        f, nbLinesRead, True,
        "Nom soutenance")  # We ignore the line giving the title of the columns
    studentLine = lookForNonBlankLine(f, nbLinesRead, True, "Nom soutenance")
    while studentLine != "":
        info = splitCsvLine(studentLine, conf.get("csvSeparator"))
        # We look for info in the list of defense names
        found = False
        for defense in defenses:
            if info[1] == defense.name:
                # OK, this defense name is known
                students.append(Student(info[0], defense, defenses))
                found = True
                break
        if not found:
            sys.exit(
                """ERREUR: Dans le fichier "{}", la ligne {} ("{}") fait référence à une soutenance intitulée "{}" qui n'apparaît pas dans le fichier "{}"."""
                .format(conf.get("studentsFilename"), nbLinesRead[0],
                        studentLine, info[1], conf.get("defensesFilename")))
        studentLine = lookForNonBlankLine(f, nbLinesRead, True,
                                          "Nom soutenance")
    f.close()

    #
    # Read the list of criteria types
    #
    criteriaTypes = []
    f = openWithErrorManagement(key2inputFileName("criteriaTypesFilename",
                                                  conf),
                                "r",
                                encoding=conf.get("encoding"))
    nbLinesRead = [0]
    criteriaType = lookForNonBlankLine(f, nbLinesRead, True, "Type de critère")
    while criteriaType != "":
        criteriaTypes.append(criteriaType)
        criteriaType = lookForNonBlankLine(f, nbLinesRead, True,
                                           "Type de critère")
    f.close()

    #
    # Read the list of criterias
    #
    criterias = []
    f = openWithErrorManagement(key2inputFileName("criteriasFilename", conf),
                                "r",
                                encoding=conf.get("encoding"))
    nbLinesRead = [0]

    lookForNonBlankLine(
        f, nbLinesRead, True,
        "Nom critère")  # We ignore the line giving the title of the columns
    criteriaLine = lookForNonBlankLine(f, nbLinesRead, True, "Nom critère")
    while criteriaLine != "":
        info = splitCsvLine(criteriaLine, conf.get("csvSeparator"))
        # We look for info[0] in the list of criteria types
        found = False
        for criteriaType in criteriaTypes:
            if info[0] == criteriaType:
                found = True
                break
        if not found:
            sys.exit(
                """ERREUR: Dans fichier "{}", la ligne {} ("{}") fait référence à un type de critère intitulée "{}" qui n'apparaît pas dans le fichier "{}"."""
                .format(f.name, nbLinesRead[0], criteriaLine, info[0],
                        conf.get("criteriaTypesFilename")))
        # OK, this citeriaType is known
        try:
            floatValue = float(info[2])
        except ValueError:
            sys.exit(
                """ERREUR: Dans fichier "{}", la ligne {} ("{}") a son 3ème champ ("{}") qui n'est pas un flottant."""
                .format(f.name, nbLinesRead[0], criteriaLine, info[2]))
        criterias.append(
            Criteria(info[0], info[1], floatValue, conf.get("ratioCriteriaKO"),
                     conf.get("ratioCriteriaOK")))
        criteriaLine = lookForNonBlankLine(f, nbLinesRead, True, "Nom critère")
    f.close()

    #
    # Prepare dateTime string which may be used for names of output files
    #
    date = datetime.datetime.now()
    s = str(date)
    dateTime = s[:s.find('.')]

    #
    # Remaining work depends on what the user asks for
    #
    if sys.argv[1][1] == '1':
        generateModels(conf, defenses, students, criteriaTypes, criterias,
                       dateTime)
    else:
        analyzeTeacherData(conf, defenses, criteriaTypes, criterias)
        analyzeStudentsData(conf, defenses, students, criteriaTypes, criterias)
        generateResults(conf, defenses, students, criteriaTypes, criterias,
                        dateTime)

    #
    # We display an end of execution message
    #
    if sys.argv[1][1] == '1':
        print(
            """OK, exécution de la phase {} terminée : les fichiers "{}", "{}" et "{}" ont été générés."""
            .format(
                sys.argv[1][1],
                key2ouputFileName("nominativeSheetsFilename", conf, dateTime),
                key2ouputFileName("genericSheetFilename", conf, dateTime),
                key2ouputFileName("genericTeacherMarksFilename", conf,
                                  dateTime)))
    else:
        print(
            """OK, exécution de la phase {} terminée : les fichiers "{}", "{}" et "{}" ont été générés."""
            .format(
                sys.argv[1][1],
                key2ouputFileName("synthesisCommentsFilename", conf, dateTime),
                key2ouputFileName("evaluationCommentsFilename", conf,
                                  dateTime),
                key2ouputFileName("studentsMarksSheetFilename", conf,
                                  dateTime)))
    print()

    return