def get_near_probes_results(self):
        if self.asn != 'AS???':
            #  try to get responding probe from target ASN
            source = self.get_asn_probe(self.asn)
            results = self.ping(source)

            #  try to get responsing probe from neighbouring ASN
            if len(results) == 0:
                asn_neighbours = AsnNeighbours(self.asn)
                asn_neighbours.run()
                if asn_neighbours.neighbours is not None:
                    for asn in asn_neighbours.neighbours:
                        source = self.get_asn_probe(asn)
                        results = self.ping(source)
                        if len(results) != 0:
                            return results

            #  if everything above fails, take a few random probes
            #  from target country and choose one with the best rtt
            if len(results) == 0:
                logging.warning(PROBE_IN_ASN_NOT_FOUND_WARNING)
                source = self.get_local_area_probes()
                results = self.ping(source)
                results = sorted(results, key=lambda x: x['avg'])[:1]
        else:
            logging.WARN(PROBE_IN_ASN_NOT_FOUND_WARNING)
            source = self.get_local_area_probes()
            results = self.ping(source)
            results = sorted(results, key=lambda x: x['avg'])[:1]
        return results
Esempio n. 2
0
    def get_train_objective_info(dataloader, loss):
        try:
            if hasattr(dataloader, 'get_config_dict'):
                train_loader = dataloader.get_config_dict()
            else:
                loader_params = {}
                loader_params['batch_size'] = dataloader.batch_size if hasattr(
                    dataloader, 'batch_size') else 'unknown'
                if hasattr(dataloader, 'sampler'):
                    loader_params['sampler'] = fullname(dataloader.sampler)
                if hasattr(dataloader, 'batch_sampler'):
                    loader_params['batch_sampler'] = fullname(
                        dataloader.batch_sampler)

            dataloader_str = """**DataLoader**:\n\n`{}` of length {} with parameters:
```
{}
```""".format(fullname(dataloader), len(dataloader), loader_params)

            loss_str = "**Loss**:\n\n`{}` {}".format(
                fullname(loss), """with parameters:
  ```
  {}
  ```""".format(loss.get_config_dict())
                if hasattr(loss, 'get_config_dict') else "")

            return [dataloader_str, loss_str]

        except Exception as e:
            logging.WARN(
                "Exception when creating get_train_objective_info: {}".format(
                    str(e)))
            return ""
Esempio n. 3
0
def regression(data_files,
               training_fraction=0.6,
               degree=2,
               limits=None,
               normalize=(),
               ridge=False):
    dataset = None

    data = load_data(data_files)
    N = len(data)
    logging.info(" >> Loaded %d data points" % N)

    dataset = data
    data = preprocess_data(data, limits=limits, normalize=normalize)
    logging.info(
        " >> Using  %d / %d data points after preprocessing (deleted %d points)"
        % (len(data), N, N - len(data)))

    data = extract_data_set(data)
    data[0] = polynomialize_data(data[0])
    data, training_data, test_data = split_data_set(
        data, training_fraction=training_fraction)

    train_size, test_size = training_data[0].shape[0], test_data[0].shape[0]
    logging.info(" >> Training on %d, testing on %d" % (train_size, test_size))
    if not (data and training_data and test_data):
        logging.WARN(" >> Did not have enough data to do regression")
        return

    reg_mod = linear_regression(*training_data, ridge=ridge)

    reg_res = evaluate(training_data, test_data, reg_mod, degree=degree)
    return dataset, (data, training_data, test_data, reg_mod), reg_res
Esempio n. 4
0
 async def update(self):
     args = list(map(self.getValue, self.__fields__))
     args.append(self.getValue(self.__primary_key__))
     rows = await execute(self.__update__, args)
     if rows != 1:
         logging.WARN('failed to update by primary key: affected rows: %s' %
                      rows)
Esempio n. 5
0
def main():

    # Default config file and profile
    config = from_file()
    # Create artifact
    artifacts_client = ArtifactsClient(config)

    # Upload Image and Signature Flow
    kms_key_id = "ocid1.key.oc1..exampleuniqueID"
    kms_key_version_id = "ocid1.keyversion.oc1..exampleuniqueID"
    signing_algo = "SHA_512_RSA_PKCS_PSS"
    compartment_id = "ocid1.compartment.oc1..exampleuniqueID"
    image_id = "ocid1.containerimage.oc1..exampleuniqueID"
    description = "Image built by TC"
    metadata = "{\"buildNumber\":\"123\"}"

    signature = sign_and_upload_container_image_signature_metadata(
        artifacts_client, config, kms_key_id, kms_key_version_id, signing_algo,
        compartment_id, image_id, description, metadata)
    logging.INFO("A signature has been successfully uploaded: %s", signature)

    # Pull Image and Verify Signature Flow
    repo_name = "repo-name"
    trusted_keys = ["ocid1.key.oc1..keyId1", "ocid1.key.oc1..keyId2"]
    image_digest = "sha256:12345"

    verified = get_and_verify_image_signature_metadata(artifacts_client,
                                                       compartment_id, False,
                                                       repo_name, image_digest,
                                                       trusted_keys)
    if verified:
        logging.INFO("At least one of the signatures is verified")
    else:
        logging.WARN("None of the signatures is verified")
Esempio n. 6
0
 def getInstance(self):
     if self.activation_type in self.actication_dicts:
         logging.info("activation use %s" % (self.activation_type))
         return self.actication_dicts[self.activation_type]
     else:
         logging.WARN("activation param is invalid. ")
         return self.swish
Esempio n. 7
0
 def traiterUnHeader(self, headers):
     try:
         cellules = headers[0].findChildren(recursive = False)
         listeHeaders = []
         dicoRowspans = {}
         decalageColspans = 0
         for j in range(len(cellules)):
             if not cellules[j].has_attr('colspan'):
                 if not cellules[j].has_attr('rowspan'):
                     # Cas où il n'y a ni colspan ni rowspan.
                     listeHeaders.append(cellules[j].text.strip())
                 else:
                     # Cas où il n'y pas de colspan mais un rowspan
                     dicoRowspans[j + decalageColspans] = Rowspan(int(cellules[j].attrs.get('rowspan')), cellules[j].text.strip())
                     listeHeaders.append(cellules[j].text.strip())
             else:
                 if not cellules[j].has_attr('rowspan'):
                     # Cas où il y un colspan mais pas de rowspan
                     for k in range(int(cellules[j].attrs.get('colspan'))):
                         listeHeaders.append(cellules[j].text.strip())
                     decalageColspans += int(cellules[j].attrs.get('colspan')) - 1
                 else:
                     # Cas où il y a un rowspan et un colspan
                     for l in range(int(cellules[j].attrs.get('colspan'))):
                         listeHeaders.append(cellules[j].text.strip())
                         dicoRowspans[j + l + decalageColspans] = Rowspan(int(cellules[j].attrs.get('colspan')), cellules[j].text.strip())
                     decalageColspans += int(cellules[j].attrs.get('colspan')) - 1
         return listeHeaders, dicoRowspans
     except:
         logging.WARN("Ce header n'a pas pu être traité. Le traitement va se poursuivre.")
         return listeHeaders, dicoRowspans
    def extract_vehicle_data(ad_link):

        try:
            img_url_list = []

            r = requests.get(ad_link)

            if r.status_code == 200:
                # Scrape Advertisement Heading
                container_main = SoupStrainer('div', {'class': re.compile('.*v-item__header.*')})
                title_soup = BeautifulSoup(r.content, features="lxml", parse_only=container_main)

                item_top = title_soup.find('h2')

                ad_title = "unknown"
                if item_top is not None:
                    ad_title = item_top.select('span')[0].text.strip()

                # Extract Brand
                brand = "unknown"
                model = "unknown"
                if ad_title != "unknown" is not None:
                    try:
                        brand_model = ad_title.split(',')[0]
                        brand_model_split = brand_model.split(' ', 1)
                        brand = brand_model_split[0].strip()
                        model = brand_model_split[1].strip()
                    except():
                        brand = "unknown"
                        model = "unknown"
                        logging.WARN("Spliting Add Title Failed For %s" % ad_title)
                        pass

                # Scrape Vehicle Info
                item_table = SoupStrainer('table', {'class': re.compile('.*v-item-table.*')})
                item_table_soup = BeautifulSoup(r.content, features="lxml", parse_only=item_table)

                # Extract Model_year
                model_year_elem = item_table_soup.find('td', text=re.compile("Release Year", re.I))
                model_year = "unknown"
                if model_year_elem is not None:
                    model_year = model_year_elem.find_next_sibling("td").find('a').text.strip()

                # Extract Body_Type
                body_type_elem = item_table_soup.find('td', text=re.compile("Body Type"))
                body_type = "unknown"
                if body_type_elem is not None:
                    body_type = body_type_elem.find_next_sibling("td").find('a').text.strip()

                # Scrape All The Image URLS
                gallery_items = SoupStrainer('div', {'class': re.compile('.*v-item__gallery.*')})
                gallery_soup = BeautifulSoup(r.content, features="lxml", parse_only=gallery_items)
                for a in gallery_soup.findAll('a', {'class': 'v-item__gallery__item'}):
                    src_url = a.get('href')
                    img_url_list.append("https:" + src_url)

                return Vehicle(ad_link, ad_title, brand, model, model_year, body_type, img_url_list)
        except:
            logging.error("Error Occurred While Scraping Data from %s" % ad_link)
            return None
Esempio n. 9
0
    def database(self):
        QDatabaseDialog = QDialog()
        Interface = Ui_Database(QDatabaseDialog)
        try:
            Interface.table.itemClicked.connect(self.openBrowserOnClick)
        except Exception as E:
            logErrors.WARN(f"Error: {E}")
        Interface.table.setRowCount(13)

        try:
            userData = Decrypter.getData()
            tableRow = 0
            # the amount of items in the database
            itemCount = len(userData)
            if itemCount > 13:  # Set the row to equal amount of items
                Interface.table.setRowCount(itemCount)

            for item in userData:
                databaseKey = userData[item]
                Interface.table.setItem(tableRow, 0,
                                        QTableWidgetItem(databaseKey['title']))
                Interface.table.setItem(tableRow, 1,
                                        QTableWidgetItem(databaseKey['email']))
                Interface.table.setItem(
                    tableRow, 2, QTableWidgetItem(databaseKey['password']))
                Interface.table.setItem(
                    tableRow, 3, QTableWidgetItem(databaseKey['message']))
                tableRow += 1
        except Exception as E:
            logErrors.info(f"Database Error: {E}")
            QMessageBox.information(self, "Information",
                                    "You have no data \nin your database")

        QDatabaseDialog.show()
        QDatabaseDialog.exec_()
Esempio n. 10
0
    def extract(self, h_info):
        """
        extract and add features for surfaces and entities in h_info['spot']
        :param h_info: spot->field->[h_sf_info] h_sf_info['entities'] = [h_e_info]
        :return: packed into h_info
        """
        assert self.resource is not None

        if SPOT_FIELD not in h_info:
            logging.WARN('spot field not found in h_info')
            return h_info
        h_new_info = deepcopy(h_info)
        h_spotted_field = h_info[SPOT_FIELD]
        h_grounded_field = {}
        for field in self.l_target_fields:
            l_h_sf_info = h_spotted_field.get(field, [])

            l_h_sf_info_with_feature = []
            for h_sf_info in l_h_sf_info:
                h_sf_info['field'] = field
                h_sf_feature = self.extract_for_surface(h_sf_info, h_info)
                h_sf_info['f'] = h_sf_feature
                l_h_e_info = h_sf_info.get('entities', [])
                l_h_e_info_with_feature = []
                for h_e_info in l_h_e_info:
                    h_e_feature = self.extract_for_entity(h_e_info, h_sf_info, h_info)
                    h_e_info['f'] = h_e_feature
                    l_h_e_info_with_feature.append(h_e_info)
                h_sf_info['entities'] = l_h_e_info_with_feature
                l_h_sf_info_with_feature.append(h_sf_info)
            h_grounded_field[field] = l_h_sf_info_with_feature
        del h_new_info[SPOT_FIELD]
        h_new_info[GROUND_FIELD] = h_spotted_field
        return h_new_info
Esempio n. 11
0
    def __init__(self, metadata: dict):

        try:

            if all(param in self.min_parameters for param in metadata.keys()):
                raise JsonMatchException(
                    "Json specification should have all keys ",
                    ",".join(metadata.keys()))
            else:
                self.columns = metadata["ColumnNames"]
                self.offsets = metadata["Offsets"]
                if "FixedWidthEncoding" not in metadata:
                    logging.WARN(""""\"FixedWidthEncoding\" does not exist. 
                     Default encoding \"windows-1252\" has been applied""")

                self.fixedWidthEncoding = metadata.get("FixedWidthEncoding",
                                                       "windows-1252")

                if "DelimitedEncoding" not in metadata:
                    logging.WARN(""""\"DelimitedEncoding\" does not exist. 
                                    Default encoding \"utf-8\" has been applied"""
                                 )
                self.delimitedEncoding = metadata.get("DelimitedEncoding",
                                                      "utf-8")

                if "IncludeHeader" not in metadata:
                    logging.WARN(""""\"IncludeHeader\" does not exist. 
                     Header is not included in the file""")

                self.include_header: bool = ast.literal_eval(
                    metadata.get("IncludeHeader", "False"))

                if len(self.columns) != len(self.offsets):
                    raise JsonMatchException(
                        "Does not have matching offsets or columns", "")
                else:
                    #self.columnOffsets = dict(zip(self.columns, self.offsets))
                    self.columnOffsets = dict(
                        zip(self.columns, [*map(int, self.offsets)]))
        except JsonMatchException as ex:
            raise ex
        except KeyError as ex:
            logger.exception(ex)
            raise JsonMatchException(ex.__str__())
        except Exception as ex:
            logger.exception(ex)
            raise ex
Esempio n. 12
0
def get_solutions(path):
    files = []
    for f in listdir(path):
        try:
            raw_file = open(join(path, f), 'rb')
            data = pickle.load(raw_file)
            files.append(data)
        except TypeError:
            logging.WARN(f"File {f} is not a pickleable object. SKIP")
    return files
Esempio n. 13
0
 def is_login(self, user=account_config.get("default_user")):
     url = self.url_head + '/user/%s' % user
     logging.info("[现在登录页面是]: {}".format(url))
     res = self.s.get(url)
     if "资料" in res.text:
         logging.info("登录成功")
         return True
     else:
         logging.WARN("登录失败")
         return False
def download_filterlist():
    try:
        r = requests.get(
            "https://raw.githubusercontent.com/AdguardTeam/AdguardFilters/master/SpywareFilter/sections/tracking_servers.txt"
        )
        os.path.join(os.getcwd(), 'resources/adguard_default.txt')
        f = open('resources/adguard_default.txt', 'w')
        f.write(codecs.decode(r.content, 'utf-8'))
    except:
        logging.WARN("Unable to update filterlist, using local version")
Esempio n. 15
0
def get_hostname(ip):
    """
    Get the FQDN from an IP
    :param ip: The ip address to scan
    :return: The FQDN name
    """
    try:
        return socket.getfqdn(ip)
    except:
        logging.WARN("Could not seem to get the FQDN for {}".format(ip))
Esempio n. 16
0
 def _convert_value_to_enum(self, value):
     if value is not None:
         value_map = self.get_enums_by_value_map(self.enum)
         if value in value_map:
             return value_map[value]
         else:
             logging.WARN(
                 'unexpected enum value: {} not found in enum {}'.format(
                     value, self.enum))
             return value
     else:
         return None
Esempio n. 17
0
 def traiterHeaders(self, lignes, largeurTableau, writer):
     try:
         headers = []
         nombreHeaders = 0
         while self.isHeader(lignes[nombreHeaders]):
             headers.append(lignes[nombreHeaders])
             nombreHeaders += 1
         if nombreHeaders > 0:
             self.ecrireHeaders(headers, largeurTableau, writer)
         return nombreHeaders
     except:
         logging.WARN("Ce header n'a pas pu être traité. Le traitement va se poursuivre.")
         return nombreHeaders
Esempio n. 18
0
def load_single(dirname, files):
    print "Dir", dirname
    relname = ""
    artname = ""
    tags = []
    for one in files:
        if one.endswith("mp3"):
            f = eyed3.load(os.path.join(dirname, one))
            tags.append(f)

    releases = [t.tag.album for t in tags]
    artists = [t.tag.artist for t in tags]

    if len(set(releases)) > 1:
        logging.WARN("Found more than 1 release name")
    if len(set(artists)) > 1:
        logging.WARN("Found more than 1 artist name")

    r = releases[0]
    a = artists[0]
    y = str(tags[0].tag.recording_date)
    rel = Release(title=r, artist=a, year=y)
    rel.save()
    for tr in sorted(tags, key=lambda tg: int(tg.tag.track_num[0])):
        dsc = tr.tag.disc_num[0]
        if not dsc:
            dsc = 1
        track = Track(release=rel,
                      name=tr.tag.title,
                      position=tr.tag.track_num[0],
                      disc=dsc,
                      length=tr.info.time_secs * 1000,
                      filename=tr.path)
        if "/Ajay/" in tr.path:
            rel.who = "Ajay"
        else:
            rel.who = "Sankalp"
        track.save()
    rel.save()
Esempio n. 19
0
 def ecrireHeaders(self, headers, largeurTableau, writer):
     try:
         if len(headers) == 1:
             listeHeaders1 = self.traiterUnHeader(headers)[0]
             writer.writerow(listeHeaders1)
         elif len(headers) == 2:
             listeHeaders2 = self.traiterDeuxHeaders(headers)
             writer.writerow(listeHeaders2)
         else:
             dicoRowspans = {}
             for i in range(len(headers)):
                 ligne = headers[i]
                 dicoRowspans = self.traiterLigne(dicoRowspans, ligne, largeurTableau, writer)
     except:
         logging.WARN("Ce header n'a pas pu être écrit. Le traitement va se poursuivre.")
Esempio n. 20
0
    def get_all_instances(self, instance_type, subdir=""):
        """
        Utility function return all data in the directory or subdirectory of an instance type.

        Annotation:
            This function is a generator!
            This means you can iterate over it but it will yield one object at a time.
            It also means that its one iteration only!
        """

        def get_object(directory):
            """
            Generator object to return the file paths
            Use generator to avoid memory issues with large directories
            """
            path = join(self.base_path, directory)
            for f in listdir(path):
                if isfile(join(path, f)):
                    yield "file", f
                else:
                    yield "subdir", join(directory, f)

        # 1) Check if the data type is correct
        try:
            inst_code = self.code_lookup[instance_type]
        except KeyError:
            raise ValueError("instance_type is not known")

        data_path = join(inst_code, "data", subdir)

        # 2) Get all the data in the directory and subdirectories
        sub_dirs = [data_path]
        while sub_dirs:
            new_subdir = sub_dirs.pop()
            for f_type, f in get_object(new_subdir):

                if f_type == "file":

                    # try to load the content
                    try:
                        file_obj = pickle.load(open(join(self.base_path, new_subdir, f), 'rb'))
                        yield {"name": f, "content": file_obj}
                    except TypeError:
                        logging.WARN(f"File {f} is not a pickleable object. SKIP")

                else:
                    sub_dirs += [f]
Esempio n. 21
0
        def compute_answer():
            try:
                content_link = "https://raw.githubusercontent.com/hexUniverse/postergirl/master/content.txt"
                tmp = requests.get(content_link).text
            except Exception as e:
                tmp = open('content.txt', 'r', encoding='utf8').read()
                logging.WARN(e)
            random.seed(query_string)
            sayList = tmp.split(',\n')[0:-1]
            articles = [{
                'type': 'article',
                'id': 'id',
                'title': '虎虎?',
                'message_text': random.choice(sayList)
            }]

            return articles, 0
Esempio n. 22
0
    def getData(self, window, loggingIn, dialog):
        '''
        Gets the inputed data from the form 
        * if the user was setting up his account then the 
        * data will be encrypted then stored 
        * else: 
        * if the user was logging in then it will first of all 
        * verify the user
        '''
        if loggingIn:  # checks if the user is in the SetupForm or the Login Form
            data = Decrypter.getData('.Data/.passwordKey',
                                     '.Data/.encryptedPassword')
            Password = window.Password.text()
            if data["password"] == Password:
                self.correctPassword = True
            else:
                self.correctPassword = False
                QMessageBox.information(self, "Error", "Wrong Password")

            # if the user enters a wrong password
            # the program will not exit
            if not self.correctPassword:
                pass
            else:
                # if the user Enters a correct password
                # the program exits and sets up the
                # Wallet's Main interface
                dialog.close()
                try:
                    if self.correctPassword:
                        self.setupUi(self)
                except Exception as e:
                    # logs any Error to the log file
                    logErrors.WARN(f"Could not Setup MainWindow. Warning: {e}")

        else:
            password = window.Password.text()
            username = window.Username.text()
            data = {'username': username, 'password': password}
            Encrypter.Encrypt(data, '.Data/.passwordKey',
                              '.Data/.encryptedPassword')
            loggingIn = True
            dialog.close()
Esempio n. 23
0
 def extraire(self, url):
     try:
         listeTableaux = []
         logging.debug("Début de l'extraction des tableaux de l'url suivante : " + url)
         wurl = self.BASE_WIKIPEDIA_URL + url
         page = urlopen(wurl)
         file = page.read()
         document = BeautifulSoup(file, features="html.parser")
         tableaux = document.find_all('table',{'class':'wikitable'})
         logging.debug("Il y a " + str(len(tableaux)) + " tableaux dans cette url.")
         for i in range(len(tableaux)):
             tableau = tableaux[i]
             self.traiterTableau(tableau, url, i)
             listeTableaux.append(tableau)
         logging.debug("Fin de l'extraction des tableaux de l'url suivante : " + url)
         return False, listeTableaux
     except:
         logging.WARN("Cette url n'a pas pu être traitée. Le traitement va se poursuivre.")
         return False, listeTableaux
Esempio n. 24
0
 def traiterLigne(self, dicoRowspans, ligne, largeurTableau, writer):
     try:
         cellulesLigneCourante = ligne.findChildren(recursive=False)
         retour = {}
         listeCellulesAEcrire = []
         compteurAnciensRowspansTraites = 0
         decalageColspans = 0
         for i in range(largeurTableau):
             if i + decalageColspans >= largeurTableau:
                 writer.writerow(listeCellulesAEcrire)
                 return dicoRowspans
             if i in dicoRowspans.keys():
                 rowspan = dicoRowspans[i]
                 listeCellulesAEcrire.append(rowspan.texte)
                 rowspan.rowspanResiduel = rowspan.rowspanResiduel - 1
                 if rowspan.rowspanResiduel > 0:
                     retour[i] = rowspan
                 compteurAnciensRowspansTraites += 1
             else:
                 if len(cellulesLigneCourante) > (i - compteurAnciensRowspansTraites):
                     cellule = cellulesLigneCourante[i - compteurAnciensRowspansTraites]
                     if not cellule.has_attr('colspan'):
                         if not cellule.has_attr('rowspan'):
                             listeCellulesAEcrire.append(cellule.text.strip())
                         else:
                             dicoRowspans[i + compteurAnciensRowspansTraites] = Rowspan(int(cellule.attrs.get('rowspan')), cellule.text.strip())
                             listeCellulesAEcrire.append(cellule.text.strip())
                     else:
                         if not cellule.has_attr('rowspan'):
                             for j in range(int(cellule.attrs.get('colspan'))):
                                 listeCellulesAEcrire.append(cellule.text.strip())
                             decalageColspans += int(cellule.attrs.get('colspan')) - 1
                         else:
                             for k in range(int(cellule.attrs.get('colspan'))):
                                 listeCellulesAEcrire.append(cellule.text.strip())
                                 dicoRowspans[i + k + decalageColspans] = Rowspan(int(cellule.attrs.get('rowspan')), cellule.text.strip())
                             decalageColspans += int(cellule.attrs.get('colspan')) - 1
                 else:
                     listeCellulesAEcrire.append("");
         writer.writerow(listeCellulesAEcrire)
         return dicoRowspans
     except:
         logging.WARN("Cette ligne n'a pas pu être traitée. Le traitement va se poursuivre.")
Esempio n. 25
0
 def traiterDeuxHeaders(self, headers):
     try:
         retourTraiterUnHeader = self.traiterUnHeader(headers)
         listeHeaders1 = retourTraiterUnHeader[0]
         dicoRowspans = retourTraiterUnHeader[1]
         listeHeaders2 = []
         nouvelleMapRowspans = {}
         idecale = 0
         cellules2 = headers[1].findChildren(recursive = False)
         for i in range(len(listeHeaders1)):
             if i in dicoRowspans.keys():
                 listeHeaders2.append(listeHeaders1[i])
                 idecale -= 1
             else:
                 listeHeaders2.append(listeHeaders1[i].strip() + " " + cellules2[idecale].text.strip())
             idecale += 1
         return listeHeaders2
     except:
         logging.WARN("Ce header n'a pas pu être traité. Le traitement va se poursuivre.")
         return listeHeaders2
Esempio n. 26
0
 def login_c(self, user=None, passwd=None):
     if user and passwd:
         user, passwd = user, passwd
     else:
         user, passwd = self.user, self.passwd
     url = url_config.get("login")
     try:
         r = self.s.get(url)
         csrf_token = re.findall(r'csrf_token.*?value="(.*?)">-', r.text)[0]
         logging.info("[获取页面token]: {}".format(csrf_token))
     except:
         logging.WARN("[获取页面token失败]")
         return
     data = {
         "username": user,
         "password": passwd,
         "submit": "Login",
         "csrf_token": csrf_token
     }
     r2 = self.s.post(url, data=data)
     return r2.text
Esempio n. 27
0
def kalkulator(działanie, pierwsza_liczba, druga_liczba):

    if działanie == 1:
        logging.info(f"Dodaję {pierwsza_liczba} oraz {druga_liczba}")
        wynik = pierwsza_liczba + druga_liczba

    elif działanie == 2:
        logging.info(f"Odejmuję {pierwsza_liczba} oraz {druga_liczba}")
        wynik = pierwsza_liczba - druga_liczba

    elif działanie == 3:
        logging.info(f"Mnożę {pierwsza_liczba} oraz {druga_liczba}")
        wynik = pierwsza_liczba * druga_liczba

    elif działanie == 4:
        logging.info(f"Dzielę {pierwsza_liczba} oraz {druga_liczba}")
        wynik = pierwsza_liczba / druga_liczba

    else:
        logging.WARN("nie ma takiej opcji")
    print(f"Twój wynik wynosi: {wynik}")
Esempio n. 28
0
 def traiterTableau(self, tableau, url, i):
     logging.debug("Début du traitement du tableau n° " + str(i + 1))
     try:
         # On initialise le fichier csv.
         csvFileName = self.mkCSVFileName(url, i + 1)
         logging.debug("Le fichier est créé ici : " + self.outputDirHtml + csvFileName);
         csvFile = open(self.outputDirHtml + csvFileName, 'w', newline = '', encoding = 'utf-8')
         writer = csv.writer(csvFile, delimiter = ';')
         largeurTableau = self.getLargeurTotaleTableau(tableau, 0)
         # On initialise le dictionnaire de rowspans et les lignes.
         dicoRowspans = {}
         lignes = tableau.find_all('tr')
         # On traite les headers
         nbHeaders = self.traiterHeaders(lignes, largeurTableau, writer)
         # On supprime les headers des lignes
         for j in range(nbHeaders):
             del lignes[0]
         for k in range(len(lignes)):
             logging.debug("On traite la ligne " + str(k) + ".")
             dicoRowspans = self.traiterLigne(dicoRowspans, lignes[k], largeurTableau, writer)
         logging.debug("Fin du traitement du tableau n° " + str(i+1))
     except:
         logging.WARN("Ce tableau n'a pas pu être traité. Le traitement va se poursuivre.")
Esempio n. 29
0
def compute_learning_progress(data_files,
                              steps=10,
                              degree=2,
                              limits=None,
                              normalize=(),
                              ridge=False):
    """Trains a polynomial with n/steps fraction increments.  Returns error."""

    data = load_data(data_files)
    N = len(data)
    logging.info(" >> Loaded %d data points" % N)

    progress = []
    step_size = 1 / (steps + 1)
    for i in range(steps):
        fraction = (i + 1) * step_size
        data = load_data(data_files)
        data = preprocess_data(data, limits=limits, normalize=normalize)
        logging.info(
            " >> Using  %d / %d data points after preprocessing (deleted %d points)"
            % (len(data), N, N - len(data)))

        data = extract_data_set(data)
        data[0] = polynomialize_data(data[0])
        data, training_data, test_data = split_data_set(
            data, training_fraction=fraction)

        if not (data and training_data and test_data):
            logging.WARN(" >> Did not have enough data to do regression")
            continue

        reg_mod = linear_regression(*training_data, ridge=ridge)

        reg_res = do_evaluate(training_data, test_data, reg_mod, degree=2)
        progress.append(reg_res)
    return progress
Esempio n. 30
0
 async def remove(self):
     args = [self.getValue(self.__primary_key__)]
     rows = execute(self.__delete__, args)
     if rows != 1:
         logging.WARN('failed to remove by primary key: affected rows: %s' % rows)