def get_xfrs_token(page_html):
    """
    Method to parse a D2L page to find the XSRF.Token. The token is returned as a string
    :param page_html:
    :return:
    """
    soup = BeautifulSoup(page_html, "html.parser")
    # TODO Loop over all of them, as the location might change
    xsrf = str(soup.findAll("script")[0]).splitlines()
    token = None

    for line in xsrf:
        if "XSRF.Token" in line:  #
            line_soup = re.findall("'(.*?)'", line)
            # We can also find our User.ID in this line as well
            for i in range(0, len(line_soup)):
                if line_soup[i] == 'XSRF.Token':
                    token = line_soup[i + 1]
                    break

    if token is None:
        logger.critical("Cannot find XSRF.Token. Code might have changed")
        exit(1)
    logger.debug("Found XSRF.Token. It's {}".format(token))

    return token
def download(rqs, furl, path, level=3):
    if furl[0:4] != 'http':
        furl = "{}/{}".format(D2L_BASEURL, furl)

    file = rqs.get(furl, stream=True, headers={
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0"
    })

    if file.status_code == 302:  # D2L, you don't f*****g redirect a 404/403 error.
        logger.error("Requested file is Not Found or Forbidden")

    if not os.path.isdir(safeFilePath(path)):
        # logger.info("Directory does not exist.")
        logger.debug(safeFilePath(path))
        mkdir_recursive(safeFilePath(path))

    try:
        name = furl.split('?')[0].split('/')[-1]

        if name == "DirectFileTopicDownload":
            name = file.headers['Content-Disposition'].split(';')[-1].split('=')[-1][1:-1]

        path += "/" + safeFilePath(name)
        with open(unquote(path), 'wb') as f:
            for chunk in tqdm.tqdm(file.iter_content(chunk_size=1024), desc="Downloading {}".format(name),
                                   position=level, unit="kb"):  # is it kb or b?

                if chunk:  # filter out keep-alive new chunks
                    f.write(chunk)
                    f.flush()
    except Exception as e:
        logger.exception("Exception caught during file download. {}", str(e))
Exemple #3
0
 def unlock_reruns(self):
     """Allow modification of max_reruns property"""
     logger.debug('Releasing rerun lock')
     self._reruns_locked = False
Exemple #4
0
    def on_task_input(self, task, config):
        # Create entries by parsing AniDB wishlist page html using beautifulsoup
        logger.verbose('Retrieving AniDB list: mywishlist:{}', config['mode'])

        task_headers = task.requests.headers.copy()
        task_headers['User-Agent'] = self.default_user_agent

        try:
            page = task.requests.get(
                self.anidb_url + 'animedb.pl',
                params=self._build_url_params(config),
                headers=task_headers,
            )
        except RequestException as e:
            raise plugin.PluginError(str(e))
        if page.status_code != 200:
            raise plugin.PluginError(
                'Unable to get AniDB list. Either the list is private or does not exist.'
            )

        entry_type = ''

        if config['type'] == 'movies':
            entry_type = 'Type: Movie'
        elif config['type'] == 'shows':
            entry_type = 'Type: TV Series'
        elif config['type'] == 'ovas':
            entry_type = 'Type: OVA'

        while True:
            soup = get_soup(page.text)
            soup_table = soup.find('table', class_='wishlist').find('tbody')

            trs = soup_table.find_all('tr')
            if not trs:
                logger.verbose(
                    'No movies were found in AniDB list: mywishlist')
                return
            for tr in trs:
                if tr.find('span', title=entry_type):
                    a = tr.find('td', class_='name').find('a')
                    if not a:
                        logger.debug(
                            'No title link found for the row, skipping')
                        continue

                    anime_title = a.string
                    if config.get('strip_dates'):
                        # Remove year from end of series name if present
                        anime_title = re.sub(r'\s+\(\d{4}\)$', '', anime_title)

                    entry = Entry()
                    entry['title'] = anime_title
                    entry['url'] = self.anidb_url + a.get('href')
                    entry['anidb_id'] = tr['id'][
                        1:]  # The <tr> tag's id is "aN..." where "N..." is the anime id
                    logger.debug('{} id is {}', entry['title'],
                                 entry['anidb_id'])
                    entry['anidb_name'] = entry['title']
                    yield entry
                else:
                    logger.verbose('Entry does not match the requested type')
            try:
                # Try to get the link to the next page.
                next_link = soup.find('li', class_='next').find('a')['href']
            except TypeError:
                # If it isn't there, there are no more pages to be crawled.
                logger.verbose('No more pages on the wishlist.')
                break
            comp_link = self.anidb_url + next_link
            logger.debug('Requesting: {}', comp_link)
            try:
                page = task.requests.get(comp_link, headers=task_headers)
            except RequestException as e:
                logger.error(str(e))
            if page.status_code != 200:
                logger.warning('Unable to retrieve next page of wishlist.')
                break
Exemple #5
0
def test_remove_simple(writer):
    i = logger.add(writer, format="{message}")
    logger.debug("1")
    logger.remove(i)
    logger.debug("2")
    assert writer.read() == "1\n"
Exemple #6
0
 def disconnect(self):
     logger.debug("Disconnecting")
     if self.ble_device is not None:
         self.ble_device.disconnect()
         self.ble_device = None
         self.__pin_already_sent = False
Exemple #7
0
 def username(self) -> str:
     username = self.specificity_credentials.get("USERNAME")
     if not username:
         username = self.credentials["USERNAME"]
         logger.debug("未能找到 {} 对应的账号,使用通用账号。", self.AUTH_NAME)
     return username
def main(args=None):
    """
    Runs the .nfo generation.
    Use -h/--help to see all options.

    :param args: the command-line arguments to use, uses sys.argv if None
    :type args: list
    """

    parser = argparse.ArgumentParser(
        description=
        'Generates Kodi .nfo files with information retrieved from IMDB using local files with the unique IMDB movie ID.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        prog="kodi-nfo-gen")
    parser.add_argument("--dir",
                        metavar="DIR",
                        dest="dir",
                        required=True,
                        help="the directory to traverse")
    parser.add_argument(
        "--type",
        dest="type",
        choices=["imdb"],
        default="imdb",
        required=False,
        help=
        "what type of ID the movie ID files represent, ie the website they are from"
    )
    parser.add_argument("--recursive",
                        action="store_true",
                        dest="recursive",
                        required=False,
                        help="whether to traverse the directory recursively")
    parser.add_argument(
        "--pattern",
        metavar="GLOB",
        dest="pattern",
        required=False,
        default="*.imdb",
        help="the pattern for the files that contain the movie IDs")
    parser.add_argument(
        "--delay",
        metavar="SECONDS",
        dest="delay",
        type=int,
        required=False,
        default=1,
        help="the delay in seconds between web queries (to avoid blacklisting)"
    )
    parser.add_argument(
        "--preferred_language",
        metavar="LANG",
        dest="language",
        required=False,
        default="en",
        help=
        "the preferred language for the titles (ISO 639-1, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)"
    )
    parser.add_argument("--fanart",
                        dest="fanart",
                        choices=["none", "download", "use-existing"],
                        default="none",
                        required=False,
                        help="how to deal with fan-art")
    parser.add_argument(
        "--fanart_file",
        metavar="FILE",
        dest="fanart_file",
        default="folder.jpg",
        required=False,
        help="when downloading or using existing fanart, use this filename")
    parser.add_argument(
        "--dry_run",
        action="store_true",
        dest="dry_run",
        required=False,
        help=
        "whether to perform a 'dry-run', ie only outputting the .nfo content to stdout but not saving it to files"
    )
    parser.add_argument(
        "--overwrite",
        action="store_true",
        dest="overwrite",
        required=False,
        help=
        "whether to overwrite existing .nfo files, ie recreating them with freshly retrieved data"
    )
    parser.add_argument("--verbose",
                        action="store_true",
                        dest="verbose",
                        required=False,
                        help="whether to output logging information")
    parser.add_argument("--debug",
                        action="store_true",
                        dest="debug",
                        required=False,
                        help="whether to output debugging information")
    parser.add_argument("--interactive",
                        action="store_true",
                        dest="interactive",
                        required=False,
                        help="for enabling interactive mode")
    parsed = parser.parse_args(args=args)
    # interactive mode turns on verbose mode
    if parsed.interactive and not (parsed.verbose or parsed.debug):
        parsed.verbose = True
    # configure loggin
    if parsed.debug:
        logging.basicConfig(level=logging.DEBUG)
    elif parsed.verbose:
        logging.basicConfig(level=logging.INFO)
    logger.debug(parsed)
    if parsed.interactive:
        logger.info("Entering interactive mode")
    generate(dir_=parsed.dir,
             idtype=parsed.type,
             recursive=parsed.recursive,
             pattern=parsed.pattern,
             dry_run=parsed.dry_run,
             overwrite=parsed.overwrite,
             language=parsed.language,
             fanart=parsed.fanart,
             fanart_file=parsed.fanart_file,
             interactive=parsed.interactive)
Exemple #9
0
async def image_converter(
        current_user: models.Client = Depends(oauth2.get_current_user),
        reference_id: str = Form(
            ..., description="=6 character alphanumeric value"),
        company_name: str = Form(...,
                                 description="<30 character alphabetic value"),
        resize_width: Optional[int] = Form(None,
                                           description="image resize width"),
        resize_height: Optional[int] = Form(None,
                                            description="image resize height"),
        image_format: Optional[str] = Form(
            ..., description="any of [jpg, jpeg, png, tiff, tif, webp]"),
        quality_check: bool = Form(True, description="image quality check"),
        image_file: UploadFile = File(..., description="image file"),
):

    logger.debug({
        "reference_id": reference_id,
        "company_name": company_name,
        "resize_width": resize_width,
        "resize_height": resize_height,
        "image_format": image_format,
        "quality_check": quality_check,
    })

    process_start_time = time.time()

    try:
        # remove this to use marshmallow validation
        # input_data_dict = sch.ImageSchema().load(
        #     {"reference_id": reference_id,
        #      "resize_width": resize_width,
        #      "resize_height": resize_height,
        #      "company_name": company_name,
        #      "quality_check": quality_check,
        #      "image_format": image_format,
        #      "image_file": image_file
        #      })
        #
        # input_data = utils.dict2obj(input_data_dict)

        # remove comment to use pydnatic validation

        image_path = await utils.save_file_aiof(image_file)

        input_data = sch.ImageSchema(
            reference_id=reference_id,
            resize_width=resize_width,
            resize_height=resize_height,
            company_name=company_name,
            quality_check=quality_check,
            image_format=image_format,
            image_file=image_path,
        )

    except sch.ValidationError as e:
        raise utils.ValidationException(e)

    image = Image.fromarray(input_data.image_file)

    if resize_width is not None:
        width_percent = input_data.resize_width / float(image.size[0])
        image_height = int(float(image.size[1]) * float(width_percent))
        image = image.resize((input_data.resize_width, image_height),
                             Image.ANTIALIAS)
    else:
        height_percent = input_data.resize_height / float(image.size[1])
        image_width = int(float(image.size[0]) * float(height_percent))
        image = image.resize((input_data.resize_height, image_width),
                             Image.ANTIALIAS)

    buf = io.BytesIO()

    image.save(buf, format="JPEG")
    base64_string = base64.b64encode(buf.getvalue()).decode()
    time_stamp = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")

    data = {
        "base64_string": base64_string,
        "reference_id": input_data.reference_id,
        "time_stamp": time_stamp,
        "process_time": (time.time() - process_start_time),
    }

    return JSONResponse({"data": data, "status": "success"}, status_code=200)
Exemple #10
0
from loguru import logger
import pandas as pd
import tcost_gui

logger.debug('Transferring info from UI...')
transfer = tcost_gui.main_dict
logger.debug('Transferring info from UI..SUCCESS')
logger.info(transfer)

amount_of_receivers = set()
cumul_of_rec = {}

# получаем информацию о количестве активных групп доставки
logger.debug('set of active delivery groups forming --->')
for k, v in transfer.items():
    amount_of_receivers.add(v[0])
logger.info(amount_of_receivers)

# формируем словарь, где ключ - номер группы доставки
logger.debug('dict of active delivery groups as keys forming --->')
for i in amount_of_receivers:
    cumul_of_rec[str(i)] = 0
logger.info(cumul_of_rec)

# для каждой группы доставки суммируем кол-во доставляемых ящиков
for i in amount_of_receivers:
    for k, v in transfer.items():
        if v[0] == i:
            cumul_of_rec[str(i)] += v[2]

result_of_module = {}
Exemple #11
0
    def wrapper(*args, **kwargs):
        logger.debug(f'Entering {f}')
        result = f(*args, **kwargs)
        logger.debug(f'Exiting {f}')

        return result
Exemple #12
0
 def __enter__(self):
     bootstrap()
     logger.debug('Start recording invocations')
Exemple #13
0
 def __init__(self):
     logger.debug('creating instance of profiler')
     self.invocations = []
Exemple #14
0
 def shutdown(self, topic="", message=""):
     logger.info(f"Shutting down mqtt client {self.name}")
     self.client.loop_stop()
     logger.debug(f"Mqtt client {self.name} shut down")
Exemple #15
0
 def read_message(self):
     logger.debug("clearing the __new_message flag")
     self.__new_message = False
Exemple #16
0
async def create_group_user(
    *,
    group: GroupUser,
    delay: int = Query(
        None,
        title=title,
        description=delay_description,
        ge=1,
        le=121,
        alias="delay",
    ),
) -> dict:
    """[summary]
    Add a user to a group
    Args:
        group (GroupUser): [description]
        delay (int, optional): [description]. Defaults to Query( None,
         title=title, description="Seconds to delay (max 121)", ge=1, \
             le=121, alias="delay", ).

    Returns:
        dict: [description]
        Confirmation of user being added
    """
    # sleep if delay option is used
    if delay is not None:
        logger.info(f"adding a delay of {delay} seconds")
        await asyncio.sleep(delay)

    check_id = str(group.group_id)
    group_id_exists = await check_id_exists(id=check_id)

    if group_id_exists is False:
        error: dict = {"error": f"Group ID '{check_id}' does not exist"}
        logger.warning(error)
        return JSONResponse(status_code=404, content=error)

    check_user = str(group.user)
    exist_user = await check_user_exists(user=check_user, group_id=check_id)

    if exist_user is True:
        error: dict = {"error": f"User ID '{check_id}' already in group"}
        logger.warning(error)
        return JSONResponse(status_code=400, content=error)

    try:

        user_id = str(uuid.uuid4())
        group_data = {
            "id": user_id,
            "user": group.user,
            "group_id": group.group_id
        }
        logger.debug(group_data)
        # create group
        query = groups_item.insert()
        group_result = await execute_one_db(query=query, values=group_data)
        logger.debug(str(group_result))

        full_result: dict = group_data
        logger.debug(full_result)
        return JSONResponse(status_code=status.HTTP_201_CREATED,
                            content=full_result)
    except Exception as e:
        error: dict = {"error": str(e)}
        logger.debug(e)
        logger.critical(f"Critical Error: {e}")
        return JSONResponse(status_code=400, content=error)
Exemple #17
0
    def remove(self,
               downloads: List[Download],
               force: bool = False,
               files: bool = False,
               clean: bool = True) -> List[bool]:
        """
        Remove the given downloads from the list.

        Parameters:
            downloads: the list of downloads to remove.
            force: whether to force the removal or not.
            files: whether to remove downloads files as well.
            clean: whether to remove the aria2 control file as well.

        Returns:
            Success or failure of the operation for each given download.
        """
        # TODO: batch/multicall candidate
        if force:
            remove_func = self.client.force_remove
        else:
            remove_func = self.client.remove

        result = []

        for download in downloads:
            if download.is_complete or download.is_removed or download.has_failed:
                logger.debug(f"Try to remove download result {download.gid}")
                try:
                    self.client.remove_download_result(download.gid)
                except ClientException as error:
                    logger.exception(error)
                    result.append(error)
                else:
                    logger.success(f"Removed download result {download.gid}")
                    result.append(True)
            else:
                logger.debug(f"Try to remove download {download.gid}")
                try:
                    removed_gid = remove_func(download.gid)
                except ClientException as error:
                    logger.exception(error)
                    result.append(error)
                else:
                    logger.success(f"Removed download {download.gid}")
                    result.append(True)
                    try:
                        self.client.remove_download_result(download.gid)
                    except ClientException as error2:
                        logger.debug(
                            f"Failed to remove download result {download.gid}")
                        logger.opt(exception=True).trace(error2)
                    if removed_gid != download.gid:
                        logger.debug(
                            f"Removed download GID#{removed_gid} is different than download GID#{download.gid}"
                        )
                        try:
                            self.client.remove_download_result(removed_gid)
                        except ClientException as error2:
                            logger.debug(
                                f"Failed to remove download result {removed_gid}"
                            )
                            logger.opt(exception=True).trace(error2)

            if clean:
                # FUTURE: use missing_ok parameter on Python 3.8
                try:
                    download.control_file_path.unlink()
                except FileNotFoundError:
                    logger.debug(
                        f"aria2 control file {download.control_file_path} was not found"
                    )
                else:
                    logger.debug(
                        f"Removed control file {download.control_file_path}")

            if files and result[-1]:
                self.remove_files([download], force=True)

        return result
Exemple #18
0
def multiregion(event):
    """ shows information for a given user's workspace

    need to pass username in the event
    """
    logger.debug("workspaces.info.multiregion() starting")

    configuration = event.get('configuration', {})

    user_workspaces = []
    for region in configuration.get('regions'):
        # need to get the directoryid for each region
        if not configuration.get('directorymap', {}).get(region):
            logger.error(f"Couldn't find directoryid for region '{region}' in workspaces.info.multiregion()")
            continue

        try:
            session = Session(
                region_name=region
            )
        except Exception as ERROR: # pylint: disable=broad-except,invalid-name
            logger.error(f"Failed to instantiate session with region {region}: {ERROR}")
            return False

        try:
            client = session.client('workspaces')
            findworkspace = client.describe_workspaces(
                DirectoryId=configuration.get('directorymap', {}).get(region),
                UserName=event.get('username'),
            )
            if findworkspace.get('Workspaces'):
                for workspace in findworkspace.get('Workspaces'):
                    wsid = workspace.get('WorkspaceId')
                    state = workspace.get('State')
                    bundlename = get_bundle_name(workspace.get('BundleId'))
                    user_workspaces.append(f"{wsid} in state {state} (Region: {region} Bundle: {bundlename})") #pylint: disable=line-too-long

        except Exception as e: # pylint: disable=broad-except,invalid-name
            logger.error(f"ERROR: {e}")
            return False

    # message the user with the results
    slackclient = WebClient(token=configuration.get('slacktoken'))
    logger.debug("Messaging user to advise...")

    if not user_workspaces:
        slackclient.chat_postEphemeral(
            channel=configuration.get('channel_id'),
            user=configuration.get('user_id'),
            text=f"No workspaces found for username {event.get('username')}"
        )
    else:
        blocks = [
            {
                "type": "section",
                "text": {
                    "type": "mrkdwn",
                    "text": f"Workspace information for user {event.get('username')}"
                }
            },
        ]
        for workspace in user_workspaces:
            blocks.append({
                "type": "section",
                "text": {
                    "type": "mrkdwn",
                    "text": f"• {workspace}"
                }
                })
        logger.debug(slackclient.chat_postEphemeral(
            channel=configuration.get('channel_id'),
            user=configuration.get('user_id'),
            blocks=blocks,
        ))
    return True
Exemple #19
0
def test_edit_existing_level(writer):
    logger.level("DEBUG", no=20, icon="!")
    fmt = "{level.no}, <level>{level.name}</level>, {level.icon}, {message}"
    logger.add(writer, format=fmt, colorize=False)
    logger.debug("a")
    assert writer.read() == "20, DEBUG, !, a\n"
Exemple #20
0
def main(args):
    root_cfg = cfg
    root_cfg.merge_from_file(args.config)
    logger.info("Load experiment configuration at: %s" % args.config)

    # resolve config
    root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)
    root_cfg = root_cfg.test
    task, task_cfg = specify_task(root_cfg)
    task_cfg.freeze()
    window_name = task_cfg.exp_name
    # build model
    model = model_builder.build(task, task_cfg.model)
    # build pipeline
    pipeline = pipeline_builder.build(task, task_cfg.pipeline, model)
    dev = torch.device(args.device)
    pipeline.set_device(dev)
    init_box = None
    template = None
    if len(args.init_bbox) == 4:
        init_box = args.init_bbox

    vw = None
    resize_ratio = args.resize
    dump_only = args.dump_only

    # create video stream
    if args.video == "webcam":
        logger.info("Starting video stream...")
        vs = cv2.VideoCapture(0)
        vs.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
    elif not osp.isfile(args.video):
        logger.info("Starting from video frame image files...")
        vs = ImageFileVideoStream(args.video, init_counter=args.start_index)
    else:
        logger.info("Starting from video file...")
        vs = cv2.VideoCapture(args.video)

    # create video writer to output video
    if args.output:
        if osp.isdir(args.output):
            vw = ImageFileVideoWriter(args.output)
        else:
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            width, height = vs.get(3), vs.get(4)
            vw = cv2.VideoWriter(
                args.output, fourcc, 25,
                (int(width * resize_ratio), int(height * resize_ratio)))

    # loop over sequence
    while vs.isOpened():
        key = 255
        ret, frame = vs.read()
        logger.debug("frame: {}".format(ret))
        if ret:
            if template is not None:
                time_a = time.time()
                rect_pred = pipeline.update(frame)
                logger.debug(rect_pred)
                show_frame = frame.copy()
                time_cost = time.time() - time_a
                bbox_pred = xywh2xyxy(rect_pred)
                bbox_pred = tuple(map(int, bbox_pred))
                cv2.putText(show_frame,
                            "track cost: {:.4f} s".format(time_cost),
                            (128, 20), cv2.FONT_HERSHEY_COMPLEX, font_size,
                            (0, 0, 255), font_width)
                cv2.rectangle(show_frame, bbox_pred[:2], bbox_pred[2:],
                              (0, 255, 0))
                if template is not None:
                    show_frame[:128, :128] = template
            else:
                show_frame = frame
            show_frame = cv2.resize(
                show_frame,
                (int(show_frame.shape[1] * resize_ratio),
                 int(show_frame.shape[0] * resize_ratio)))  # resize
            if not dump_only:
                cv2.imshow(window_name, show_frame)
            if vw is not None:
                vw.write(show_frame)
        # catch key if
        if (init_box is None) or (vw is None):
            logger.debug("Press key s to select object.")
            key = cv2.waitKey(30) & 0xFF
        logger.debug("key: {}".format(key))
        if key == ord("q"):
            break
        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        elif key == ord("s"):
            # select the bounding box of the object we want to track (make
            # sure you press ENTER or SPACE after selecting the ROI)
            logger.debug("Select object to track")
            box = cv2.selectROI(window_name,
                                frame,
                                fromCenter=False,
                                showCrosshair=True)
            if box[2] > 0 and box[3] > 0:
                init_box = box
        elif key == ord("c"):
            logger.debug(
                "init_box/template released, press key s to select object.")
            init_box = None
            template = None
        if (init_box is not None) and (template is None):
            template = cv2.resize(
                frame[int(init_box[1]):int(init_box[1] + init_box[3]),
                      int(init_box[0]):int(init_box[0] + init_box[2])],
                (128, 128))
            pipeline.init(frame, init_box)
            logger.debug(
                "pipeline initialized with bbox : {}".format(init_box))
    vs.release()
    if vw is not None:
        vw.release()
    cv2.destroyAllWindows()
Exemple #21
0
 def cancel(self):
     """Send exit signal to thread"""
     logger.debug("Delayed call got cancellation signal")
     self._exit_early = True
 def _add_proxy_url(self):
     urls = []
     with open("proxy_url.txt", "r") as f:
         for line in f.readlines():
             urls.append(line[:-1])
     logger.info(f"number of proxy url {len(urls)}")
     for url in urls:
         logger.debug(f"update {url}")
         try:
             if len(self.ip_q["https"]) > 10:
                 proxies = {
                     "https":
                     self._get_good_proxy("https", change_priority=False)
                 }
             else:
                 proxies = None
             resp = requests.get(f"https://{url}",
                                 timeout=15,
                                 proxies=proxies)
             regs = re.findall(r'((?:\d{1,3}\.){3}\d{1,3}):(\d+)',
                               resp.text)
             for protocol in self.PROTOCOLS:
                 for reg in regs:
                     self.add_ip(protocol, reg[0] + ":" + reg[1])
             logger.debug(f"done url {url}")
         except:
             try:
                 if len(self.ip_q["http"]) > 10:
                     proxies = {
                         "http":
                         self._get_good_proxy("http", change_priority=False)
                     }
                 else:
                     proxies = None
                 resp = requests.get(f"http://{url}",
                                     timeout=15,
                                     proxies=proxies)
                 regs = re.findall(r'((?:\d{1,3}\.){3}\d{1,3}):(\d+)',
                                   resp.text)
                 for protocol in self.PROTOCOLS:
                     for reg in regs:
                         self.add_ip(protocol, reg[0] + ":" + reg[1])
                 logger.debug(f"done url {url}")
             except:
                 try:
                     resp = requests.get(f"https://{url}", timeout=15)
                     regs = re.findall(r'((?:\d{1,3}\.){3}\d{1,3}):(\d+)',
                                       resp.text)
                     for protocol in self.PROTOCOLS:
                         for reg in regs:
                             self.add_ip(protocol, reg[0] + ":" + reg[1])
                     logger.debug(f"done url {url}")
                 except:
                     logger.info(f"bad url https://{url}")
                     try:
                         resp = requests.get(f"https://{url}", timeout=15)
                         regs = re.findall(
                             r'((?:\d{1,3}\.){3}\d{1,3}):(\d+)', resp.text)
                         for protocol in self.PROTOCOLS:
                             for reg in regs:
                                 self.add_ip(protocol,
                                             reg[0] + ":" + reg[1])
                         logger.debug(f"done url {url}")
                     except:
                         logger.info(f"bad url http://{url}")
Exemple #23
0
    def _post_install(self):
        logger.debug("Collecting tmproot files timestamps")
        tmproot_timestamps = self._collect_times()

        logger.debug("Dropping absolute paths from pkg-config")
        self._drop_absolute_pkgconfig_paths()

        logger.debug("Purging libtools' files")
        self._purge_libtools_files()

        # TODO: maybe this should be put into the configuration and not in orchestra itself
        logger.debug("Converting hardlinks to symbolic")
        self._hard_to_symbolic()

        # TODO: maybe this should be put into the configuration and not in orchestra itself
        logger.debug("Fixing RPATHs")
        self._fix_rpath()

        # TODO: this should be put into the configuration and not in orchestra itself
        logger.debug("Replacing NDEBUG preprocessor statements")
        self._replace_ndebug(self.build.ndebug)

        # TODO: this should be put into the configuration and not in orchestra itself
        logger.debug("Replacing ASAN preprocessor statements")
        self._replace_asan(self.build.asan)

        logger.debug("Restoring tmproot files timestamps")
        self._restore_mtimes(tmproot_timestamps)

        if self.build.component.license:
            logger.debug("Copying license file")
            source = self.build.component.license
            destination = installed_component_license_path(self.build.component.name, self.config)
            script = dedent(
                f"""
                DESTINATION_DIR="$TMP_ROOT$(dirname "{destination}")"
                mkdir -p "$DESTINATION_DIR"
                for DIR in "$BUILD_DIR" "$SOURCE_DIR"; do
                  if test -e "$DIR/{source}"; then
                    cp "$DIR/{source}" "$TMP_ROOT/{destination}"
                    exit 0
                  fi
                done
                echo "Couldn't find {source}"
                exit 1
                """
            )
            self._run_internal_script(script)
Exemple #24
0
 def send_pin(self):
     if not self.__pin_already_sent:
         logger.debug("Write PIN to {}", self.address)
         pin_handler = 0x24
         self.ble_device.writeCharacteristic(pin_handler, self.pin, True)
         self.__pin_already_sent = True
Exemple #25
0
 def _save_hash_material(self):
     logger.debug("Saving hash material")
     hash_material_path = Path(self._hash_material_path())
     hash_material_path.write_text(self.component.recursive_hash_material())
Exemple #26
0
def test_remove_enqueue_filesink(tmpdir):
    file = tmpdir.join("test.log")
    i = logger.add(str(file), format="{message}", enqueue=True)
    logger.debug("1")
    logger.remove(i)
    assert file.read() == "1\n"
Exemple #27
0
 def debug(self, msg):
     logger.debug(msg)
Exemple #28
0
 def lock_reruns(self):
     """Prevent modification of max_reruns property"""
     logger.debug('Enabling rerun lock')
     self._reruns_locked = True
Exemple #29
0
async def handle_task(
    websocket: WebSocket,
    db: ITaskflowDb = Depends(di.db),
    scheduler: TaskScheduler = Depends(di.scheduler),
):
    """
    Websocket endpoint for starting a task
    """

    task = None
    try:
        await websocket.accept()

        # The client sends the NewTask object
        data = await websocket.receive_json()
        new_task = NewTask.parse_obj(data)

        task_ = new_task.to_task()
        await db.insert_task(task_)
        task = task_

        # Sends the resolved task
        await websocket.send_text(task.json())

        # Waits for execution and sends updates periodically
        while True:
            pending_tasks_count = await db.get_pending_tasks_count()
            running_tasks_count = await db.get_running_tasks_count()
            message = SocketMessage(
                type=MessageType.INFO_UPDATE,
                data=ClientUpdateInfo(
                    pending_tasks_count=pending_tasks_count,
                    running_tasks_count=running_tasks_count,
                ),
            )
            await websocket.send_text(message.json())

            # Check if the task can be executed
            can_start = await scheduler.wait_for_task_execution(task)
            if can_start:
                message = SocketMessage(type=MessageType.TASK_CAN_START)
                await websocket.send_text(message.json())

                task.is_running = True
                task.started_at = get_timestamp_ms()
                await db.update_task(task)

                break

        # Wait for task finish
        can_finish = False
        while not can_finish:
            data = await websocket.receive_json()
            try:
                message = SocketMessage.parse_obj(data)
                if message.type == MessageType.TASK_FINISH:
                    can_finish = True
                elif message.type == MessageType.TASK_UPDATE:
                    task = Task.parse_obj(message.data)
                    logger.debug(task.json())
                    await db.update_task(task)
            except ValueError:
                logger.warning("Invalid JSON data")

        await websocket.close()
    except (WebSocketDisconnect, ConnectionClosed):
        logger.warning("Socket disconnected")
    except Exception as e:
        logger.exception("", e)
        await websocket.close()

    # Cleanup
    if task is not None:
        logger.info(f"Task {task.id} finished")
        await db.delete_task(task)

    del task
        json_blob = json.loads(sanitized)

        # json_blob['Payload']['Html'] is literally rendered html that contains everything we need. But, it's HTML.
        # We're going to parse it with BS4 so we can read it.
        # Holy shit this worked
        page_soup = BeautifulSoup(json_blob['Payload']['Html'], "html.parser")

        # Now, we have a semi-nice table we can parse for all our courses. It's in the format of
        #    | Course Name  | Course Code  | Semester  | Standard Department | Start  | End
        # and all we need to do is find all the `tr`- table row
        courses = page_soup.find_all('tr')
        parsed_courses = []
        for course in courses:
            _course_soup = course.find_all('td')
            if len(_course_soup) < 5:
                logger.debug("Ignoring course row due to less than expected data length")
                continue

            metadata = {}
            # Find the name and ID. D2l puts the first column as a <th>, so we need to keep this mind
            name = course.find('th').find('a')
            metadata['name'] = name.text.strip().replace("/", "_")
            metadata['id'] = name.get('href').split('/')[-1]
            metadata['course_code'] = _course_soup[0].find('div').text.strip()
            metadata['semester'] = _course_soup[1].find('div').text.strip()
            metadata['dept'] = _course_soup[2].find('div').text.strip()
            metadata['start'] = _course_soup[3].find('div').text.strip()
            metadata['end'] = _course_soup[4].find('div').text.strip()

            parsed_courses.append(metadata)
def train_data(train_loader, val_loader, train_set, val_set, config, device):

    # define a loss function, and optimizer
    model = Classifier().to(device)
    criterion = nn.CrossEntropyLoss() 
    optimizer = torch.optim.Adam(model.parameters(), lr=config['learning_rate'])

    num_epoch = config['num_epoch']
    model_path = config['model_path']
    # start training

    best_acc = 0.0
    for epoch in range(num_epoch):
        train_acc = 0.0
        train_loss = 0.0
        val_acc = 0.0
        val_loss = 0.0

        # training
        model.train() # set the model to training mode
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad() 
            outputs = model(inputs) 
            batch_loss = criterion(outputs, labels)
            _, train_pred = torch.max(outputs, 1) # get the index of the class with the highest probability
            batch_loss.backward() 
            optimizer.step() 

            train_acc += (train_pred.cpu() == labels.cpu()).sum().item()
            train_loss += batch_loss.item()

        # validation
        if len(val_set) > 0:
            model.eval() # set the model to evaluation mode
            with torch.no_grad():
                for i, data in enumerate(val_loader):
                    inputs, labels = data
                    inputs, labels = inputs.to(device), labels.to(device)
                    outputs = model(inputs)
                    batch_loss = criterion(outputs, labels) 
                    _, val_pred = torch.max(outputs, 1) 
                
                    val_acc += (val_pred.cpu() == labels.cpu()).sum().item() # get the index of the class with the highest probability
                    val_loss += batch_loss.item()


                logger.debug('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f} | Val Acc: {:3.6f} loss: {:3.6f}'.format(
                    epoch + 1, num_epoch, train_acc/len(train_set), train_loss/len(train_loader), val_acc/len(val_set), val_loss/len(val_loader)
                ))
                # use tensorboard to visualize the accuracy and loss
                real_train_acc = train_acc/len(train_set)
                real_train_loss = train_loss/len(train_loader)
                real_val_acc = val_acc/len(val_set)
                real_val_loss = val_loss/len(val_loader)
                writer.add_scalar("Acc/Val", real_val_acc, epoch)
                writer.add_scalar("Loss/Val", real_val_loss, epoch)
                writer.add_scalar("Acc/train", real_train_acc, epoch)
                writer.add_scalar("Loss/train", real_train_loss, epoch)

                # if the model improves, save a checkpoint at this epoch
                if val_acc > best_acc:
                    best_acc = val_acc
                    torch.save(model.state_dict(), model_path)
                    logger.debug('saving model with acc {:.3f}'.format(best_acc/len(val_set)))
        else:
            logger.debug('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f}'.format(
                epoch + 1, num_epoch, train_acc/len(train_set), train_loss/len(train_loader)
            ))

    writer.flush()
    writer.close()
    # if not validating, save the last epoch
    if len(val_set) == 0:
        torch.save(model.state_dict(), model_path)
        logger.debug('saving model at last epoch')
def main():
    # get device 
    logger.debug('------------------------------------------------------------------------------------------------------')
    device = get_device()
    logger.debug(f'DEVICE: {device}')
    same_seeds(0)
    batch_size = config['batch_size']
    val_ratio = config['val_ratio']
    logger.debug('New train begins')
    logger.debug('Loading data ...')
    train_loader, val_loader, test_loader, test_set, train_set, val_set = prep_dataloader(batch_size, val_ratio)
    logger.debug('Loading data complete')
    train_data(train_loader, val_loader, train_set, val_set, config, device)
    predict_data(test_loader, device)
    logger.debug('All done')
Exemple #33
0
async def group_id(
    group_id: str = Query(
        None,
        title="Group ID",
        description="Get by the Group UUID",
        alias="groupId",
    ),
    group_name: str = Query(
        None,
        title="Group Name",
        description="Get by the Group Name",
        alias="groupName",
    ),
    delay: int = Query(
        None,
        title=title,
        description=delay_description,
        ge=1,
        le=121,
        alias="delay",
    ),
) -> dict:
    """[summary]
    Get individual group data, including users
    Args:
        group_id (str, optional): [description]. Defaults to Query( None,
         title="Group ID", description="Get by the Group UUID", alias="groupId", ).
        group_name (str, optional): [description]. Defaults to Query( None,
         title="Group Name", description="Get by the Group Name", alias="groupName", ).
        delay (int, optional): [description]. Defaults to Query( None,
         title=title, description="Seconds to delay (max 121)", ge=1, le=121, \
             alias="delay", ).

    Returns:
        dict: [description]
        Group data and associated users
    """
    # sleep if delay option is used
    if delay is not None:
        await asyncio.sleep(delay)

    # if search by ID
    if group_id is not None:

        id_exists = await check_id_exists(group_id)
        if id_exists is False:
            error: dict = {"error": f"Group ID: '{group_id}' not found"}
            logger.warning(error)
            return JSONResponse(status_code=404, content=error)

    # elif search by name
    elif group_name is not None:

        name_exists = await check_unique_name(group_name)
        if name_exists is True:
            error: dict = {"error": f"Group Name: '{group_name}' not found"}
            logger.warning(error)
            return JSONResponse(status_code=404, content=error)

        query = groups.select().where(groups.c.name == group_name)
        name_result = await fetch_one_db(query=query)
        group_id = name_result["id"]
    # else at least one needs to be selected
    else:
        error: dict = {"error": "groupId or groupName must be used"}
        logger.warning(error)
        return JSONResponse(status_code=404, content=error)

    query = groups_item.select().where(groups_item.c.group_id == group_id)
    db_result = await fetch_all_db(query=query)

    users_list: list = []
    user_dict: list = []
    for r in db_result:
        logger.debug(r)
        user_data: dict = {
            "id": r["id"],
            "user": r["user"],
            "date_created": str(r["date_create"]),
        }
        user_dict.append(user_data)
        users_list.append(r["user"])
    result = {
        "group_id": group_id,
        "count": len(users_list),
        "users": users_list,
        "user_info": user_dict,
    }
    return JSONResponse(status_code=200, content=result)