def download(rqs, furl, path, level=3):
    if furl[0:4] != 'http':
        furl = "{}/{}".format(D2L_BASEURL, furl)

    file = rqs.get(furl, stream=True, headers={
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0"
    })

    if file.status_code == 302:  # D2L, you don't f*****g redirect a 404/403 error.
        logger.error("Requested file is Not Found or Forbidden")

    if not os.path.isdir(safeFilePath(path)):
        # logger.info("Directory does not exist.")
        logger.debug(safeFilePath(path))
        mkdir_recursive(safeFilePath(path))

    try:
        name = furl.split('?')[0].split('/')[-1]

        if name == "DirectFileTopicDownload":
            name = file.headers['Content-Disposition'].split(';')[-1].split('=')[-1][1:-1]

        path += "/" + safeFilePath(name)
        with open(unquote(path), 'wb') as f:
            for chunk in tqdm.tqdm(file.iter_content(chunk_size=1024), desc="Downloading {}".format(name),
                                   position=level, unit="kb"):  # is it kb or b?

                if chunk:  # filter out keep-alive new chunks
                    f.write(chunk)
                    f.flush()
    except Exception as e:
        logger.exception("Exception caught during file download. {}", str(e))
Пример #2
0
    def on_task_output(self, task, config):
        config = self.prepare_config(config)
        # don't add when learning
        if task.options.learn:
            return
        if not config['enabled']:
            return
        # Do not run if there is nothing to do
        if not task.accepted:
            return
        if self.client is None:
            self.client = self.create_rpc_client(config)
            if self.client:
                logger.debug('Successfully connected to transmission.')
            else:
                raise plugin.PluginError("Couldn't connect to transmission.")
        session_torrents = self.client.get_torrents()
        for entry in task.accepted:
            if task.options.test:
                logger.info('Would {} {} in transmission.', config['action'],
                            entry['title'])
                continue
            # Compile user options into appropriate dict
            options = self._make_torrent_options_dict(config, entry)
            torrent_info = None
            for t in session_torrents:
                if t.hashString.lower() == entry.get(
                        'torrent_info_hash',
                        '').lower() or t.id == entry.get('transmission_id'):
                    torrent_info = t
                    logger.debug(
                        'Found {} already loaded in transmission as {}',
                        entry['title'],
                        torrent_info.name,
                    )
                    break

            if not torrent_info:
                if config['action'] != 'add':
                    logger.warning(
                        'Cannot {} {} because it is not loaded in transmission.',
                        config['action'],
                        entry['title'],
                    )
                    continue
                downloaded = not entry['url'].startswith('magnet:')

                # Check that file is downloaded
                if downloaded and 'file' not in entry:
                    entry.fail('`file` field missing?')
                    continue

                # Verify the temp file exists
                if downloaded and not os.path.exists(entry['file']):
                    tmp_path = os.path.join(task.manager.config_base, 'temp')
                    logger.debug('entry: {}', entry)
                    logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))
                    entry.fail("Downloaded temp file '%s' doesn't exist!?" %
                               entry['file'])
                    continue

                try:
                    if downloaded:
                        with open(entry['file'], 'rb') as f:
                            filedump = base64.b64encode(
                                f.read()).decode('utf-8')
                        torrent_info = self.client.add_torrent(
                            filedump, 30, **options['add'])
                    else:
                        if options['post'].get('magnetization_timeout', 0) > 0:
                            options['add']['paused'] = False
                        torrent_info = self.client.add_torrent(
                            entry['url'], timeout=30, **options['add'])
                except TransmissionError as e:
                    logger.opt(exception=True).debug('TransmissionError')
                    logger.debug('Failed options dict: {}', options['add'])
                    msg = 'Error adding {} to transmission. TransmissionError: {}'.format(
                        entry['title'], e.message or 'N/A')
                    logger.error(msg)
                    entry.fail(msg)
                    continue
                logger.info('"{}" torrent added to transmission',
                            entry['title'])
                # The info returned by the add call is incomplete, refresh it
                torrent_info = self.client.get_torrent(torrent_info.id)
            else:
                # Torrent already loaded in transmission
                if options['add'].get('download_dir'):
                    logger.verbose('Moving {} to "{}"', torrent_info.name,
                                   options['add']['download_dir'])
                    # Move data even if current reported torrent location matches new location
                    # as transmission may fail to automatically move completed file to final
                    # location but continue reporting final location instead of real location.
                    # In such case this will kick transmission to really move data.
                    # If data is already located at new location then transmission just ignore
                    # this command.
                    self.client.move_torrent_data(
                        torrent_info.id, options['add']['download_dir'], 120)

            try:
                total_size = torrent_info.totalSize
                main_id = None
                find_main_file = (options['post'].get('main_file_only')
                                  or 'content_filename' in options['post'])
                skip_files = options['post'].get('skip_files')
                # We need to index the files if any of the following are defined
                if find_main_file or skip_files:
                    file_list = self.client.get_files(
                        torrent_info.id)[torrent_info.id]

                    if options['post'].get('magnetization_timeout',
                                           0) > 0 and not file_list:
                        logger.debug(
                            'Waiting {} seconds for "{}" to magnetize',
                            options['post']['magnetization_timeout'],
                            entry['title'],
                        )
                        for _ in range(
                                options['post']['magnetization_timeout']):
                            sleep(1)
                            file_list = self.client.get_files(
                                torrent_info.id)[torrent_info.id]
                            if file_list:
                                total_size = self.client.get_torrent(
                                    torrent_info.id,
                                    ['id', 'totalSize']).totalSize
                                break
                        else:
                            logger.warning(
                                '"{}" did not magnetize before the timeout elapsed, file list unavailable for processing.',
                                entry['title'],
                            )

                    # Find files based on config
                    dl_list = []
                    skip_list = []
                    main_list = []
                    ext_list = ['*.srt', '*.sub', '*.idx', '*.ssa', '*.ass']

                    main_ratio = config['main_file_ratio']
                    if 'main_file_ratio' in options['post']:
                        main_ratio = options['post']['main_file_ratio']

                    for f in file_list:
                        # No need to set main_id if we're not going to need it
                        if find_main_file and file_list[f][
                                'size'] > total_size * main_ratio:
                            main_id = f

                        if 'include_files' in options['post']:
                            if any(
                                    fnmatch(file_list[f]['name'], mask) for
                                    mask in options['post']['include_files']):
                                dl_list.append(f)
                            elif options['post'].get('include_subs') and any(
                                    fnmatch(file_list[f]['name'], mask)
                                    for mask in ext_list):
                                dl_list.append(f)

                        if skip_files:
                            if any(
                                    fnmatch(file_list[f]['name'], mask)
                                    for mask in skip_files):
                                skip_list.append(f)

                    if main_id is not None:
                        # Look for files matching main ID title but with a different extension
                        if options['post'].get('rename_like_files'):
                            for f in file_list:
                                # if this filename matches main filename we want to rename it as well
                                fs = os.path.splitext(file_list[f]['name'])
                                if fs[0] == os.path.splitext(
                                        file_list[main_id]['name'])[0]:
                                    main_list.append(f)
                        else:
                            main_list = [main_id]

                        if main_id not in dl_list:
                            dl_list.append(main_id)
                    elif find_main_file:
                        logger.warning(
                            'No files in "{}" are > {:.0f}% of content size, no files renamed.',
                            entry['title'],
                            main_ratio * 100,
                        )

                    # If we have a main file and want to rename it and associated files
                    if 'content_filename' in options[
                            'post'] and main_id is not None:
                        if 'download_dir' not in options['add']:
                            download_dir = self.client.get_session(
                            ).download_dir
                        else:
                            download_dir = options['add']['download_dir']

                        # Get new filename without ext
                        file_ext = os.path.splitext(
                            file_list[main_id]['name'])[1]
                        file_path = os.path.dirname(
                            os.path.join(download_dir,
                                         file_list[main_id]['name']))
                        filename = options['post']['content_filename']
                        if config['host'] == 'localhost' or config[
                                'host'] == '127.0.0.1':
                            counter = 1
                            while os.path.exists(
                                    os.path.join(file_path,
                                                 filename + file_ext)):
                                # Try appending a (#) suffix till a unique filename is found
                                filename = '%s(%s)' % (
                                    options['post']['content_filename'],
                                    counter,
                                )
                                counter += 1
                        else:
                            logger.debug(
                                'Cannot ensure content_filename is unique '
                                'when adding to a remote transmission daemon.')

                        for index in main_list:
                            file_ext = os.path.splitext(
                                file_list[index]['name'])[1]
                            logger.debug(
                                'File {} renamed to {}',
                                file_list[index]['name'],
                                filename + file_ext,
                            )
                            # change to below when set_files will allow setting name, more efficient to have one call
                            # fl[index]['name'] = os.path.basename(pathscrub(filename + file_ext).encode('utf-8'))
                            try:
                                self.client.rename_torrent_path(
                                    torrent_info.id,
                                    file_list[index]['name'],
                                    os.path.basename(
                                        str(pathscrub(filename + file_ext))),
                                )
                            except TransmissionError:
                                logger.error(
                                    'content_filename only supported with transmission 2.8+'
                                )

                    if options['post'].get(
                            'main_file_only') and main_id is not None:
                        # Set Unwanted Files
                        options['change']['files_unwanted'] = [
                            x for x in file_list if x not in dl_list
                        ]
                        options['change']['files_wanted'] = dl_list
                        logger.debug(
                            'Downloading {} of {} files in torrent.',
                            len(options['change']['files_wanted']),
                            len(file_list),
                        )
                    elif (not options['post'].get('main_file_only')
                          or main_id is None) and skip_files:
                        # If no main file and we want to skip files

                        if len(skip_list) >= len(file_list):
                            logger.debug(
                                'skip_files filter would cause no files to be downloaded; '
                                'including all files in torrent.')
                        else:
                            options['change']['files_unwanted'] = skip_list
                            options['change']['files_wanted'] = [
                                x for x in file_list if x not in skip_list
                            ]
                            logger.debug(
                                'Downloading {} of {} files in torrent.',
                                len(options['change']['files_wanted']),
                                len(file_list),
                            )

                # Set any changed file properties
                if list(options['change'].keys()):
                    self.client.change_torrent(torrent_info.id, 30,
                                               **options['change'])

                if config['action'] == 'add':
                    # if add_paused was defined and set to False start the torrent;
                    # prevents downloading data before we set what files we want
                    start_paused = (
                        options['post']['paused']
                        if 'paused' in options['post'] else
                        not self.client.get_session().start_added_torrents)
                    if start_paused:
                        self.client.stop_torrent(torrent_info.id)
                    else:
                        self.client.start_torrent(torrent_info.id)
                elif config['action'] in ('remove', 'purge'):
                    self.client.remove_torrent(
                        [torrent_info.id],
                        delete_data=config['action'] == 'purge')
                    logger.info('{}d {} from transmission', config['action'],
                                torrent_info.name)
                elif config['action'] == 'pause':
                    self.client.stop_torrent([torrent_info.id])
                    logger.info('paused {} in transmission', torrent_info.name)
                elif config['action'] == 'resume':
                    self.client.start_torrent([torrent_info.id])
                    logger.info('resumed {} in transmission',
                                torrent_info.name)

            except TransmissionError as e:
                logger.opt(exception=True).debug('TransmissionError')
                logger.debug('Failed options dict: {}', options)
                msg = 'Error trying to {} {}, TransmissionError: {}'.format(
                    config['action'], entry['title'], e.message or 'N/A')
                logger.error(msg)
                continue
Пример #3
0
    def _check_passwd(self) -> bool:
        if self.user.password != self.password:
            logger.error(f"{self.user.email} 登录密码错误")
            abort(403, message="密码错误")

        return True
Пример #4
0
    def run(self):
        from ..autonlp import AutoNLP

        logger.info(f"Uploading files for project: {self._name}")
        client = AutoNLP()
        try:
            project = client.get_project(name=self._name)
        except ValueError:
            logger.error(f"Project {self._name} not found! You can create it using the create_project command.")
            sys.exit(1)
        splits = self._col_mapping.split(",")
        col_maps = {}
        for s in splits:
            k, v = s.split(":")
            col_maps[k] = v
        logger.info(f"Mapping: {col_maps}")

        files = self._files.split(",")
        try:
            project.upload(filepaths=files, split=self._split, col_mapping=col_maps, path_to_audio=self._path_to_audio)
            print(
                "🎉 Yupee! Your files have been uploaded.\n"
                f"Once you're done, starting a training here: {RED}autonlp train --project {project.name}{RST}"
            )
        except ValueError as err:
            logger.error("❌ Something went wrong!")
            logger.error("Details:")
            logger.error(str(err))
        except FileNotFoundError as err:
            logger.error("❌ One path you provided is invalid!")
            logger.error("Details:")
            logger.error(str(err))
        except InvalidFileError as err:
            logger.error("❌ Sorry, AutoNLP is not able to process the files you want to upload")
            logger.error("Details:")
            for line in str(err).splitlines():
                logger.error(line)
        except InvalidColMappingError as err:
            logger.error("❌ The column mapping you provided is incorrect!")
            logger.error("Details:")
            for line in str(err).splitlines():
                logger.error(line)
Пример #5
0
def Main():
    default_config = os.path.join(CCF_Etc, 'cchost.toml')
    parser = argparse.ArgumentParser(
        description='Script to enable the CCF network')

    parser.add_argument(
        '--logfile',
        help='Name of the log file, __screen__ for standard output',
        type=str)
    parser.add_argument('--loglevel',
                        help='Logging level',
                        default='WARNING',
                        type=str)
    parser.add_argument('--ccf-config',
                        help='Name of the CCF configuration file',
                        default=default_config,
                        type=str)
    parser.add_argument('--member-name',
                        help="Name of the member adding the user",
                        default="memberccf",
                        type=str)
    parser.add_argument('--user-name',
                        help="Name of the user being added",
                        default="userccf",
                        type=str)
    parser.add_argument('--add-node',
                        help="Add a new node to existing CCF network",
                        action="store_true")
    parser.add_argument('--node-id',
                        help="id of the node to be added to the ccf network",
                        type=int)

    options = parser.parse_args()

    # -----------------------------------------------------------------
    LOG.remove()
    if options.logfile == '__screen__':
        LOG.add(sys.stderr, level=options.loglevel)
    else:
        LOG.add(options.logfile)

    # -----------------------------------------------------------------
    try:
        config = toml.load(options.ccf_config)
    except:
        LOG.error('unable to load ccf configuration file {0}'.format(
            options.ccf_config))
        pass

    member_cert = os.path.join(CCF_Keys,
                               "{}_cert.pem".format(options.member_name))
    member_key = os.path.join(CCF_Keys,
                              "{}_privk.pem".format(options.member_name))
    network_cert = config["start"]["network-cert-file"]
    (host, port) = config["rpc-address"].split(':')

    try:
        member_client = CCFClient(host=host,
                                  port=port,
                                  cert=member_cert,
                                  key=member_key,
                                  ca=network_cert,
                                  format='json',
                                  prefix='gov',
                                  description="none",
                                  version="2.0",
                                  connection_timeout=3,
                                  request_timeout=3)
    except:
        LOG.error('failed to connect to CCF service')
        sys.exit(-1)

    #Temporary fix to skip checking CCF host certificate. Version 0.11.7 CCF certificate expiration was hardcoded to end of 2021
    member_client.client_impl.session.mount("https://", HTTPAdapter())
    member_client.client_impl.session.verify = False

    open_network_script(member_client, options, config)
    add_user_script(member_client, options, config)

    LOG.info('CCF network ready for use')
    sys.exit(0)
Пример #6
0
    def save_url_image_to_path(self, state, data_url, path, state_config={}):
        """Saves URL image from data_url to the specified path.

        Parameters
        ----------
        state : str
            Two-letter abbreviation of the state or territory. Used for special-casing sizes, etc.

        data_url : str
            URL of data site to save

        path : str
            Local path to which to save .png screenshot of data_url

        state_config : dict
            If exists, this is a dict used for denoting phantomJScloud special casing or file type
        """

        # if we need to just download the file, don't use phantomjscloud
        if state_config and state_config.get('file'):
            if self.dry_run:
                logger.warning(f'Dry run: Downloading file from {data_url}')
                return

            logger.info(f"Downloading file from {data_url}")
            response = requests.get(data_url)
            if response.status_code == 200:
                with open(path, 'wb') as f:
                    f.write(response.content)
                return
            else:
                logger.error(f'Response status code: {response.status_code}')
                raise ValueError(f'Could not download data from URL: {data_url}')

        logger.info(f"Retrieving {data_url}")
        data = {
            'url': data_url,
            'renderType': 'png',
        }

        if state_config:
            # update data with state_config minus message
            state_config_copy = state_config.copy()
            message = state_config_copy.pop('message', None)
            for field in ['url', 'name']:  # we don't want to override the URL in case it's dynamic
                state_config_copy.pop(field)
            if message:
                logger.info(message)
            data.update(state_config_copy)

        # set maxWait if unset
        if 'requestSettings' in data:
            if 'maxWait' not in data['requestSettings']:
                data['requestSettings']['maxWait'] = 60000
        else:
            data['requestSettings'] = {'maxWait': 60000}

        if self.dry_run:
            logger.warning(f'Dry run: PhantomJsCloud request for {state} from {data_url}: {data}')
            return

        logger.info('Posting request %s...' % data)
        response = requests.post(self.phantomjs_url, json.dumps(data))
        logger.info('Done.')

        if response.status_code == 200:
            with open(path, 'wb') as f:
                f.write(response.content)
        else:
            logger.error(f'Response status code: {response.status_code}')
            if 'meta' in response.json():
                response_metadata = response.json()['meta']
                raise ValueError(f'Could not retrieve URL {data_url}, got response metadata {response_metadata}')
            else:
                raise ValueError(
                    'Could not retrieve URL %s and response has no metadata. Full response: %s' % (
                        data_url, response.json()))
Пример #7
0
    def run(self):

        # ---- Subscribe ----
        with self.neuron:

            # ---- Weights ----
            self.row = self.neuron.metagraph.row

            # --- Run state ---
            self.global_step = 0
            self.best_train_loss = math.inf

            # --- Loop forever ---
            for self.epoch in range(self.config.miner.n_epochs):
                try:
                    # ---- Serve ----
                    self.neuron.axon.serve(self.model)

                    # ---- Train Model ----
                    self.train()
                    self.scheduler.step()

                    # If model has borked for some reason, we need to make sure it doesn't emit weights
                    # Instead, reload into previous version of model
                    if torch.any(
                            torch.isnan(
                                torch.cat([
                                    param.view(-1)
                                    for param in self.model.parameters()
                                ]))):
                        self.model, self.optimizer = self.model_toolbox.load_model(
                            self.config)
                        continue

                    # ---- Emit row-weights ----
                    self.neuron.metagraph.set_weights(
                        self.row, wait_for_inclusion=True
                    )  # Sets my row-weights on the chain.

                    # ---- Sync metagraph ----
                    self.neuron.metagraph.sync(
                    )  # Pulls the latest metagraph state (with my update.)
                    self.row = self.neuron.metagraph.row

                    # --- Epoch logs ----
                    print(self.neuron.axon.__full_str__())
                    print(self.neuron.dendrite.__full_str__())
                    print(self.neuron.metagraph)

                    # ---- Update Tensorboard ----
                    self.neuron.dendrite.__to_tensorboard__(
                        self.tensorboard, self.global_step)
                    self.neuron.metagraph.__to_tensorboard__(
                        self.tensorboard, self.global_step)
                    self.neuron.axon.__to_tensorboard__(
                        self.tensorboard, self.global_step)

                    # ---- Save best loss and model ----
                    if self.training_loss and self.epoch % 10 == 0:
                        if self.training_loss < self.best_train_loss:
                            self.best_train_loss = self.training_loss  # update best train loss
                            self.model_toolbox.save_model(
                                self.config.miner.full_path, {
                                    'epoch':
                                    self.epoch,
                                    'model_state_dict':
                                    self.model.state_dict(),
                                    'loss':
                                    self.best_train_loss,
                                    'optimizer_state_dict':
                                    self.optimizer.state_dict(),
                                })
                            self.tensorboard.add_scalar(
                                'Neuron/Train_loss', self.training_loss,
                                self.global_step)

                # --- Catch Errors ----
                except Exception as e:
                    logger.error('Exception in training script with error: {}',
                                 e)
                    logger.info(traceback.print_exc())
                    logger.info('Continuing to train.')
                    time.sleep(1)
Пример #8
0
 def error(self, msg: str, exception: Exception = None):
     self.has_error = True
     logger.error(msg)
     if exception != None:
         logger.exception(exception)
     self.messages.append(("ERROR", msg, exception))
Пример #9
0
async def query_arm(ctx: Context, args: argparse.Namespace) -> None:
    logger.info(f"Starting enumeration for ARM - {ctx.cloud['ARM']}")

    async with SubscriptionClient(ctx.cred_async,
                                  base_url=ctx.cloud["ARM"]) as sub_client:
        async for tenant in sub_client.tenants.list():
            tenant_dict = tenant.as_dict()
            tenant_dict["subscriptions"] = []
            logger.info(
                f"Enumerating subscription and resource groups for tenant {tenant.tenant_id}"
            )

            # GET LIST OF SUBS.
            sub_list = []
            async for subscription in sub_client.subscriptions.list():
                if args.subs:
                    if not subscription.subscription_id in args.subs:
                        continue
                if args.nosubs:
                    if subscription.subscription_id in args.nosubs:
                        continue
                sub_list.append(subscription)

            if not sub_list:
                logger.error(f"No subscriptions found for {tenant.tenant_id}")
                continue

            # ENUMERATE MANAGEMENT CERTS
            if ctx.cloud["MGMT"]:
                certsTasks = [
                    asyncio.create_task(_query_management_certs(ctx, sub))
                    for sub in sub_list
                ]

                certs_output = OUTPUT_FOLDER / f"certs.sqlite"

                for cert in asyncio.as_completed(*[certsTasks]):
                    if await cert:
                        await sqlite_writer(certs_output, cert)

            # ENUMERATE RBAC
            executor = concurrent.futures.ThreadPoolExecutor(
                max_workers=len(sub_list))
            rbacTasks = {
                executor.submit(_query_rbac, ctx, sub)
                for sub in sub_list
            }

            backfills = {
                "User": set(),
                "Group": set(),
                "ServicePrincipal": set(),
                "Application": set(),
            }  # Dict of object IDs to hold for AAD enumeration

            rbac_output = OUTPUT_FOLDER / f"rbac.sqlite"
            for rbac in concurrent.futures.as_completed(*[rbacTasks]):
                if rbac.result():
                    for role in rbac.result():
                        await sqlite_writer(rbac_output, role)
                        if args.backfill:
                            backfills[role["principal_type"]].add(
                                role["principal_id"])

            # Only do backfill if azure argument is true (meaning specified on command line)
            if args.azure and args.backfill:
                await rbac_backfill(ctx, args, backfills)

            # ENUMERATE TENANT DATA
            subTasks = [
                asyncio.create_task(_query_subscription(ctx, sub))
                for sub in sub_list
            ]

            for result in asyncio.as_completed(*[subTasks]):
                tenant_dict["subscriptions"].append(await result)

            tenant_output = OUTPUT_FOLDER / f"tenant.sqlite"
            await sqlite_writer(tenant_output, tenant_dict)
Пример #10
0
    def fetch_schema(client, prefix, file_prefix=None):
        if file_prefix is None:
            file_prefix = prefix
        api_response = client.get(f"/{prefix}/api")
        check(api_response,
              error=lambda status, msg: status == http.HTTPStatus.OK.value)

        response_body = api_response.body.json()
        paths = response_body["paths"]
        all_methods.extend(paths.keys())
        fetched_version = response_body["info"]["version"]

        formatted_schema = json.dumps(response_body, indent=2)
        openapi_target_file = os.path.join(args.schema_dir,
                                           f"{file_prefix}_openapi.json")

        try:
            old_schema.remove(openapi_target_file)
        except KeyError:
            pass

        with open(openapi_target_file, "a+", encoding="utf-8") as f:
            f.seek(0)
            previous = f.read()
            if previous != formatted_schema:
                file_version = "0.0.0"
                try:
                    from_file = json.loads(previous)
                    file_version = from_file["info"]["version"]
                except (json.JSONDecodeError, KeyError):
                    pass
                if version.parse(fetched_version) > version.parse(
                        file_version):
                    LOG.debug(
                        f"Writing schema to {openapi_target_file} - overwriting {file_version} with {fetched_version}"
                    )
                    f.truncate(0)
                    f.seek(0)
                    f.write(formatted_schema)
                else:
                    LOG.error(
                        f"Found differences in {openapi_target_file}, but not overwriting as retrieved version is not newer ({fetched_version} <= {file_version})"
                    )
                    alt_file = os.path.join(
                        args.schema_dir,
                        f"{file_prefix}_{fetched_version}_openapi.json")
                    LOG.error(f"Writing to {alt_file} for comparison")
                    with open(alt_file, "w", encoding="utf-8") as f2:
                        f2.write(formatted_schema)
                    try:
                        old_schema.remove(alt_file)
                    except KeyError:
                        pass
                changed_files.append(openapi_target_file)
            else:
                LOG.debug("Schema matches in {}".format(openapi_target_file))

        try:
            openapi_spec_validator.validate_spec(response_body)
        except Exception as e:
            LOG.error(f"Validation of {prefix} schema failed")
            LOG.error(e)
            return False

        return True
Пример #11
0
def run(args):
    os.makedirs(args.schema_dir, exist_ok=True)

    changed_files = []
    old_schema = set(
        os.path.join(dir_path, filename)
        for dir_path, _, filenames in os.walk(args.schema_dir)
        for filename in filenames)

    documents_valid = True
    all_methods = []

    def fetch_schema(client, prefix, file_prefix=None):
        if file_prefix is None:
            file_prefix = prefix
        api_response = client.get(f"/{prefix}/api")
        check(api_response,
              error=lambda status, msg: status == http.HTTPStatus.OK.value)

        response_body = api_response.body.json()
        paths = response_body["paths"]
        all_methods.extend(paths.keys())
        fetched_version = response_body["info"]["version"]

        formatted_schema = json.dumps(response_body, indent=2)
        openapi_target_file = os.path.join(args.schema_dir,
                                           f"{file_prefix}_openapi.json")

        try:
            old_schema.remove(openapi_target_file)
        except KeyError:
            pass

        with open(openapi_target_file, "a+", encoding="utf-8") as f:
            f.seek(0)
            previous = f.read()
            if previous != formatted_schema:
                file_version = "0.0.0"
                try:
                    from_file = json.loads(previous)
                    file_version = from_file["info"]["version"]
                except (json.JSONDecodeError, KeyError):
                    pass
                if version.parse(fetched_version) > version.parse(
                        file_version):
                    LOG.debug(
                        f"Writing schema to {openapi_target_file} - overwriting {file_version} with {fetched_version}"
                    )
                    f.truncate(0)
                    f.seek(0)
                    f.write(formatted_schema)
                else:
                    LOG.error(
                        f"Found differences in {openapi_target_file}, but not overwriting as retrieved version is not newer ({fetched_version} <= {file_version})"
                    )
                    alt_file = os.path.join(
                        args.schema_dir,
                        f"{file_prefix}_{fetched_version}_openapi.json")
                    LOG.error(f"Writing to {alt_file} for comparison")
                    with open(alt_file, "w", encoding="utf-8") as f2:
                        f2.write(formatted_schema)
                    try:
                        old_schema.remove(alt_file)
                    except KeyError:
                        pass
                changed_files.append(openapi_target_file)
            else:
                LOG.debug("Schema matches in {}".format(openapi_target_file))

        try:
            openapi_spec_validator.validate_spec(response_body)
        except Exception as e:
            LOG.error(f"Validation of {prefix} schema failed")
            LOG.error(e)
            return False

        return True

    with infra.network.network(args.nodes, args.binary_dir, args.debug_nodes,
                               args.perf_nodes) as network:
        network.start_and_open(args)
        primary, _ = network.find_primary()

        check = infra.checker.Checker()

        with primary.client("user0") as user_client:
            LOG.info("user frontend")
            if not fetch_schema(user_client, "app"):
                documents_valid = False

        with primary.client() as node_client:
            LOG.info("node frontend")
            if not fetch_schema(node_client, "node"):
                documents_valid = False

        with primary.client("member0") as member_client:
            LOG.info("member frontend")
            if not fetch_schema(member_client, "gov"):
                documents_valid = False

    made_changes = False

    if len(old_schema) > 0:
        LOG.error(
            "Removing old files which are no longer reported by the service:")
        for f in old_schema:
            LOG.error(" " + f)
            os.remove(f)
            f_dir = os.path.dirname(f)
            # Remove empty directories too
            while not os.listdir(f_dir):
                os.rmdir(f_dir)
                f_dir = os.path.dirname(f_dir)
        made_changes = True

    if len(changed_files) > 0:
        LOG.error("Found problems with the following schema files:")
        for f in changed_files:
            LOG.error(" " + f)
        made_changes = True

    if args.list_all:
        LOG.info("Discovered methods:")
        for method in sorted(set(all_methods)):
            LOG.info(f"  {method}")

    if made_changes or not documents_valid:
        assert False
Пример #12
0
def main(
    exp_cfg,
    show=False,
    demo_output_folder='demo_output',
    pause=-1,
    focal_length=5000,
    sensor_width=36,
    save_vis=True,
    save_params=False,
    save_mesh=False,
    degrees=[],
):

    device = torch.device('cuda')
    if not torch.cuda.is_available():
        logger.error('CUDA is not available!')
        sys.exit(3)

    logger.remove()
    logger.add(lambda x: tqdm.write(x, end=''),
               level=exp_cfg.logger_level.upper(),
               colorize=True)

    demo_output_folder = osp.expanduser(osp.expandvars(demo_output_folder))
    logger.info(f'Saving results to: {demo_output_folder}')
    os.makedirs(demo_output_folder, exist_ok=True)

    model = SMPLXNet(exp_cfg)
    try:
        model = model.to(device=device)
    except RuntimeError:
        # Re-submit in case of a device error
        sys.exit(3)

    checkpoint_folder = osp.join(exp_cfg.output_folder,
                                 exp_cfg.checkpoint_folder)
    checkpointer = Checkpointer(model,
                                save_dir=checkpoint_folder,
                                pretrained=exp_cfg.pretrained)

    arguments = {'iteration': 0, 'epoch_number': 0}
    extra_checkpoint_data = checkpointer.load_checkpoint()
    for key in arguments:
        if key in extra_checkpoint_data:
            arguments[key] = extra_checkpoint_data[key]

    model = model.eval()

    means = np.array(exp_cfg.datasets.body.transforms.mean)
    std = np.array(exp_cfg.datasets.body.transforms.std)

    render = save_vis or show
    body_crop_size = exp_cfg.get('datasets', {}).get('body',
                                                     {}).get('transforms').get(
                                                         'crop_size', 256)
    if render:
        hd_renderer = HDRenderer(img_size=body_crop_size)

    dataloaders = make_all_data_loaders(exp_cfg, split='test')

    body_dloader = dataloaders['body'][0]

    total_time = 0
    cnt = 0
    for bidx, batch in enumerate(tqdm(body_dloader, dynamic_ncols=True)):

        full_imgs_list, body_imgs, body_targets = batch
        if full_imgs_list is None:
            continue

        full_imgs = to_image_list(full_imgs_list)
        body_imgs = body_imgs.to(device=device)
        body_targets = [target.to(device) for target in body_targets]
        full_imgs = full_imgs.to(device=device)

        torch.cuda.synchronize()
        start = time.perf_counter()
        model_output = model(body_imgs,
                             body_targets,
                             full_imgs=full_imgs,
                             device=device)
        torch.cuda.synchronize()
        elapsed = time.perf_counter() - start
        cnt += 1
        total_time += elapsed

        hd_imgs = full_imgs.images.detach().cpu().numpy().squeeze()
        body_imgs = body_imgs.detach().cpu().numpy()
        body_output = model_output.get('body')

        _, _, H, W = full_imgs.shape
        #  logger.info(f'{H}, {W}')
        #  H, W, _ = hd_imgs.shape
        if render:
            hd_imgs = np.transpose(undo_img_normalization(hd_imgs, means, std),
                                   [0, 2, 3, 1])
            hd_imgs = np.clip(hd_imgs, 0, 1.0)
            right_hand_crops = body_output.get('right_hand_crops')
            left_hand_crops = torch.flip(body_output.get('left_hand_crops'),
                                         dims=[-1])
            head_crops = body_output.get('head_crops')
            bg_imgs = undo_img_normalization(body_imgs, means, std)

            right_hand_crops = undo_img_normalization(right_hand_crops, means,
                                                      std)
            left_hand_crops = undo_img_normalization(left_hand_crops, means,
                                                     std)
            head_crops = undo_img_normalization(head_crops, means, std)

        body_output = model_output.get('body', {})
        num_stages = body_output.get('num_stages', 3)
        stage_n_out = body_output.get(f'stage_{num_stages - 1:02d}', {})
        model_vertices = stage_n_out.get('vertices', None)

        if stage_n_out is not None:
            model_vertices = stage_n_out.get('vertices', None)

        faces = stage_n_out['faces']
        if model_vertices is not None:
            model_vertices = model_vertices.detach().cpu().numpy()
            camera_parameters = body_output.get('camera_parameters', {})
            camera_scale = camera_parameters['scale'].detach()
            camera_transl = camera_parameters['translation'].detach()

        out_img = OrderedDict()

        final_model_vertices = None
        stage_n_out = model_output.get('body', {}).get('final', {})
        if stage_n_out is not None:
            final_model_vertices = stage_n_out.get('vertices', None)

        if final_model_vertices is not None:
            final_model_vertices = final_model_vertices.detach().cpu().numpy()
            camera_parameters = model_output.get('body', {}).get(
                'camera_parameters', {})
            camera_scale = camera_parameters['scale'].detach()
            camera_transl = camera_parameters['translation'].detach()

        hd_params = weak_persp_to_blender(
            body_targets,
            camera_scale=camera_scale,
            camera_transl=camera_transl,
            H=H,
            W=W,
            sensor_width=sensor_width,
            focal_length=focal_length,
        )

        if save_vis:
            bg_hd_imgs = np.transpose(hd_imgs, [0, 3, 1, 2])
            out_img['hd_imgs'] = bg_hd_imgs
        if render:
            # Render the initial predictions on the original image resolution
            hd_orig_overlays = hd_renderer(
                model_vertices,
                faces,
                focal_length=hd_params['focal_length_in_px'],
                camera_translation=hd_params['transl'],
                camera_center=hd_params['center'],
                bg_imgs=bg_hd_imgs,
                return_with_alpha=True,
            )
            out_img['hd_orig_overlay'] = hd_orig_overlays

        # Render the overlays of the final prediction
        if render:
            hd_overlays = hd_renderer(
                final_model_vertices,
                faces,
                focal_length=hd_params['focal_length_in_px'],
                camera_translation=hd_params['transl'],
                camera_center=hd_params['center'],
                bg_imgs=bg_hd_imgs,
                return_with_alpha=True,
                body_color=[0.4, 0.4, 0.7])
            out_img['hd_overlay'] = hd_overlays

        for deg in degrees:
            hd_overlays = hd_renderer(
                final_model_vertices,
                faces,
                focal_length=hd_params['focal_length_in_px'],
                camera_translation=hd_params['transl'],
                camera_center=hd_params['center'],
                bg_imgs=bg_hd_imgs,
                return_with_alpha=True,
                render_bg=False,
                body_color=[0.4, 0.4, 0.7],
                deg=deg,
            )
            out_img[f'hd_rendering_{deg:03.0f}'] = hd_overlays

        if save_vis:
            for key in out_img.keys():
                out_img[key] = np.clip(
                    np.transpose(out_img[key], [0, 2, 3, 1]) * 255, 0,
                    255).astype(np.uint8)

        for idx in tqdm(range(len(body_targets)), 'Saving ...'):
            fname = body_targets[idx].get_field('fname')
            curr_out_path = osp.join(demo_output_folder, fname)
            os.makedirs(curr_out_path, exist_ok=True)

            if save_vis:
                for name, curr_img in out_img.items():
                    pil_img.fromarray(curr_img[idx]).save(
                        osp.join(curr_out_path, f'{name}.png'))

            if save_mesh:
                # Store the mesh predicted by the body-crop network
                naive_mesh = o3d.geometry.TriangleMesh()
                naive_mesh.vertices = Vec3d(model_vertices[idx] +
                                            hd_params['transl'][idx])
                naive_mesh.triangles = Vec3i(faces)
                mesh_fname = osp.join(curr_out_path, f'body_{fname}.ply')
                o3d.io.write_triangle_mesh(mesh_fname, naive_mesh)

                # Store the final mesh
                expose_mesh = o3d.geometry.TriangleMesh()
                expose_mesh.vertices = Vec3d(final_model_vertices[idx] +
                                             hd_params['transl'][idx])
                expose_mesh.triangles = Vec3i(faces)
                mesh_fname = osp.join(curr_out_path, f'{fname}.ply')
                o3d.io.write_triangle_mesh(mesh_fname, expose_mesh)

            if save_params:
                params_fname = osp.join(curr_out_path, f'{fname}_params.npz')
                out_params = dict(fname=fname)
                for key, val in stage_n_out.items():
                    if torch.is_tensor(val):
                        val = val.detach().cpu().numpy()[idx]
                    out_params[key] = val
                for key, val in hd_params.items():
                    if torch.is_tensor(val):
                        val = val.detach().cpu().numpy()
                    if np.isscalar(val[idx]):
                        out_params[key] = val[idx].item()
                    else:
                        out_params[key] = val[idx]
                np.savez_compressed(params_fname, **out_params)

            if show:
                nrows = 1
                ncols = 4 + len(degrees)
                fig, axes = plt.subplots(ncols=ncols,
                                         nrows=nrows,
                                         num=0,
                                         gridspec_kw={
                                             'wspace': 0,
                                             'hspace': 0
                                         })
                axes = axes.reshape(nrows, ncols)
                for ax in axes.flatten():
                    ax.clear()
                    ax.set_axis_off()

                axes[0, 0].imshow(hd_imgs[idx])
                axes[0, 1].imshow(out_img['rgb'][idx])
                axes[0, 2].imshow(out_img['hd_orig_overlay'][idx])
                axes[0, 3].imshow(out_img['hd_overlay'][idx])
                start = 4
                for deg in degrees:
                    axes[0, start].imshow(
                        out_img[f'hd_rendering_{deg:03.0f}'][idx])
                    start += 1

                plt.draw()
                if pause > 0:
                    plt.pause(pause)
                else:
                    plt.show()

    logger.info(f'Average inference time: {total_time / cnt}')
Пример #13
0
def run(get_command, args):
    if args.fixed_seed:
        seed(getpass.getuser())

    hosts = args.nodes
    if not hosts:
        hosts = ["localhost"] * number_of_local_nodes(args)

    LOG.info("Starting nodes on {}".format(hosts))

    with infra.ccf.network(
        hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
    ) as network:
        network.start_and_join(args)
        primary, backups = network.find_nodes()

        command_args = get_command_args(args, get_command)

        if args.network_only:
            run_client(args, primary, command_args)
        else:
            nodes = filter_nodes(primary, backups, args.send_tx_to)
            clients = []
            client_hosts = args.client_nodes or ["localhost"]
            for client_id, client_host in enumerate(client_hosts):
                node = nodes[client_id % len(nodes)]
                remote_client = configure_remote_client(
                    args, client_id, client_host, node, command_args
                )
                clients.append(remote_client)

            for remote_client in clients:
                remote_client.start()

            hard_stop_timeout = 90

            try:
                with cimetrics.upload.metrics() as metrics:
                    tx_rates = infra.rates.TxRates(primary)
                    start_time = time.time()
                    while True:
                        stop_waiting = True
                        for i, remote_client in enumerate(clients):
                            done = remote_client.check_done()
                            # all the clients need to be done
                            LOG.info(
                                f"Client {i} has {'completed' if done else 'not completed'} running"
                            )
                            stop_waiting = stop_waiting and done
                        if stop_waiting:
                            break
                        if time.time() > start_time + hard_stop_timeout:
                            raise TimeoutError(
                                f"Client still running after {hard_stop_timeout}s"
                            )

                        time.sleep(1)

                    tx_rates.get_metrics()
                    for remote_client in clients:
                        remote_client.print_and_upload_result(args.label, metrics)
                        remote_client.stop()

                    LOG.info(f"Rates:\n{tx_rates}")
                    tx_rates.save_results(args.metrics_file)

            except Exception:
                LOG.error("Stopping clients due to exception")
                for remote_client in clients:
                    remote_client.stop()
                raise
Пример #14
0
def handle(update, context):

    util.log_chat("quotes", update)

    try:

        # Extract query...
        query = update.message.text
        query = query.split(" ")

        command = None

        try:
            command = query[1].lower()
        except:
            command = "random"

        if command == "add":
            if update.message.reply_to_message.text:
                response = add_quote(update)
                update.message.reply_text(text=response,
                                          parse_mode=ParseMode.MARKDOWN)

        elif command == "remove":
            if util.is_admin(update.message.from_user["id"]):
                try:
                    id_to_remove = query[2]
                except:
                    update.message.reply_text(
                        text="Please include the Quote ID you want to remove!",
                        parse_mode=ParseMode.MARKDOWN,
                    )
                    return
                response = remove_quote(id_to_remove)
            else:
                response = "Chal kat re bsdk!"

            update.message.reply_text(text=response,
                                      parse_mode=ParseMode.MARKDOWN)

        elif command == "get":

            response = ""

            try:
                quote_id = query[2]
            except:
                update.message.reply_text(
                    text="Please include the Quote ID you want to get!",
                    parse_mode=ParseMode.MARKDOWN,
                )
                return

            quote = get_quote_by_id(quote_id)

            if quote is None:

                group_id = util.get_group_id_from_update(update)
                if group_id is None:
                    update.message.reply_text(
                        text="Can't run this command here!")
                    return

                # Return a random quote
                random_quote = get_random_quote(group_id)

                if random_quote is None:
                    logger.info("[quotes] No quotes found! - group_id={}",
                                group_id)
                    update.message.reply_text(
                        text=
                        "No quotes found for this Group! You can add quotes with `/quotes add`",
                        parse_mode=ParseMode.MARKDOWN,
                    )
                    return

                response = "Couldn't find quote with ID `{}`... but here's a random one - \n".format(
                    quote_id)

            pretty_quote = generate_pretty_quote(quote)
            response = response + pretty_quote
            update.message.reply_text(text=response,
                                      parse_mode=ParseMode.MARKDOWN)

        elif command == "join":

            response = ""

            try:
                quote_id = query[2]
            except:
                update.message.reply_text(
                    text="Please include the Quote ID you want to join!",
                    parse_mode=ParseMode.MARKDOWN,
                )
                return

            quote = get_quote_by_id(quote_id)

            if quote is None:
                update.message.reply_text(
                    text="Couldn't find quote with ID `{}`!".format(quote_id),
                    parse_mode=ParseMode.MARKDOWN,
                )
                return

            response = join_quotes(quote, update)

            update.message.reply_text(text=response,
                                      parse_mode=ParseMode.MARKDOWN)

        else:

            # Enforce rate limiting on getting random quotes
            bakchod = dao.get_bakchod_by_id(update.message.from_user.id)
            history = bakchod.history

            if history is None:
                history = {}

            two_min_ago = datetime.datetime.now() - datetime.timedelta(
                minutes=2)

            if "random_quote_get" in history:
                last_time_get = ciso8601.parse_datetime(
                    history["random_quote_get"])
                if last_time_get > two_min_ago:
                    logger.info(
                        "[quotes] request random quote too soon... skipping")
                    update.message.reply_text(
                        "Quotes ki dukan band hai... come back later!")
                    return

            history["random_quote_get"] = datetime.datetime.now()
            bakchod.history = history
            dao.insert_bakchod(bakchod)

            group_id = util.get_group_id_from_update(update)
            if group_id is None:
                update.message.reply_text(text="Can't run this command here!")
                return

            # Return a random quote
            random_quote = get_random_quote(group_id)

            if random_quote is None:
                logger.info("[quotes] No quotes found! - group_id={}",
                            group_id)
                update.message.reply_text(
                    text=
                    "No quotes found for this Group! You can add quotes with `/quotes add`",
                    parse_mode=ParseMode.MARKDOWN,
                )
                return

            logger.info("[quotes] Got a random quote '{}", random_quote)

            pretty_quote = generate_pretty_quote(random_quote)

            try:
                quote_pics.generate_quote_pic(random_quote, update,
                                              pretty_quote)
            except Exception as e:
                logger.error(
                    "Caught error in generate_quote_pic - {} \n {}",
                    e,
                    traceback.format_exc(),
                )
                update.message.reply_text(text=pretty_quote,
                                          parse_mode=ParseMode.MARKDOWN)

            return

    except Exception as e:
        logger.error(
            "Caught Error in quotes.handle - {} \n {}",
            e,
            traceback.format_exc(),
        )
def mkdir_recursive(path):
    try:
        os.makedirs(path, exist_ok=True)
    except Exception as e:
        logger.error("Exception: {}".format(e))
        exit(1)
Пример #16
0
    def forward(ctx, caller: Receptor, dummy: torch.Tensor, inputs: torch.Tensor, mode: bittensor.proto.Modality) -> Tuple[torch.Tensor, int]:  
        r""" Internal autograd-friendly Forward RPC call to a remote neuron (calls the Forward method on an Axon terminal.)

            Args:
                ctx: (:obj:`torch.autograd.ctx`, `required`):
                    Autograd context, saves state information between forward and backward calls. i.e. inputs for gradient computation.

                caller: (:obj:`Receptor`, `required`):
                    Caller object the remote neuron containing the endpoint information, RPC channel etc.

                dummy: (:obj:`torch.Tensor`, `required`):
                    Dummy torch tensor used to ensure that torch.backward computation is called on this function 
                    regardless of the input types.
  
                inputs (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
                    Torch tensor to be sent to the caller associated endpoint neurons.

                mode (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
                    Bittensor forward modality type. Enum in [TEXT, IMAGE, TENSOR]

            Returns:
                output (:obj:`Tuple[torch.FloatTensor`, torch.LongTensor]`, `optional`):
                    Result from forward call. May be None in the case of failure.

                code (:obj:`bittensor.proto.ReturnCode`, `required`):
                    Return code associated with forward call.
        """
        # ---- Save for backward call ---
        ctx.caller = caller
        ctx.mode = mode
        ctx.inputs = inputs

        zeros = nill_response_for(inputs)
        try:
            # ---- Check inputs size ----
            if torch.numel(inputs) == 0:
                return zeros, torch.tensor(bittensor.proto.ReturnCode.EmptyRequest)

            # ---- Inputs Serialization ----
            try:
                serializer = serialization.get_serializer( bittensor.proto.Serializer.MSGPACK )
                serialized_inputs = serializer.serialize(inputs, modality = mode, from_type = bittensor.proto.TensorType.TORCH)
            except Exception as e:
                logger.warning('Serialization error with error {}', e)
                return zeros, torch.tensor(bittensor.proto.ReturnCode.RequestSerializationException)
            ctx.serialized_inputs =  serialized_inputs

            # ---- Build request ----
            request = bittensor.proto.TensorMessage(
                version = bittensor.__version__,
                public_key = ctx.caller.wallet.hotkey.public_key,
                nounce = ctx.caller.nounce,
                signature = ctx.caller.signature,
                tensors = [serialized_inputs])
        
            # ---- Make RPC call ----
            try:
                start_time = time.time()
                ctx.caller.stats.forward_qps.update(1)
                ctx.caller.stats.forward_bytes_out.update(sys.getsizeof(request))
                logger.debug('<white>Dendrite</white> <green>Forward Request</green> ---> <white>to</white>:{}, <white>inputs</white>:{}, <white>mode</white>:{}', caller.endpoint, inputs.shape, mode)
                response = ctx.caller.stub.Forward(request, timeout=caller.config.receptor.timeout)
                ctx.caller.stats.forward_bytes_in.update(sys.getsizeof(response))
                ctx.caller.stats.forward_elapsed_time.update((time.time() - start_time))

                # Get message
                try:
                    response_message = response.message 
                except:
                    response_message = ''

                # ---- Catch non-code ----
                try:
                    bittensor_code = response.return_code
                except:
                    logger.debug('<white>Dendrite</white> <green>Forward Response</> <--- <white>code</white>:<yellow>UnknownException</yellow>, <white>from</white>:{}, message:<red>{}</red>', caller.endpoint, inputs.shape, mode)
                    return zeros, torch.tensor(bittensor_code)

                # ---- Catch bittensor errors ----
                if bittensor_code == bittensor.proto.ReturnCode.UnknownException:
                    logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<yellow>UnknownException</yellow>, <white>from</white>:{}, message:<red>{}</red>', caller.endpoint, response_message)
                    return zeros, torch.tensor(bittensor_code)

                elif bittensor_code != bittensor.proto.ReturnCode.Success:
                    logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<yellow>{}</yellow>, <white>from</white>:{}, message:<red>{}</red>', bittensor_code, caller.endpoint,response_message)
                    return zeros, torch.tensor(bittensor_code)

            # ---- Catch GRPC Errors ----
            except grpc.RpcError as rpc_error_call:
                grpc_code = rpc_error_call.code()

                if grpc_code == grpc.StatusCode.DEADLINE_EXCEEDED:
                    logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<yellow>Timeout</yellow>, <white>from</white>:{}', caller.endpoint )
                    return zeros, torch.tensor(bittensor.proto.ReturnCode.Timeout)

                elif grpc_code == grpc.StatusCode.UNAVAILABLE:
                    logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<yellow>Unavailable</yellow>, <white>from</white>:{}', caller.endpoint )
                    return zeros, torch.tensor(bittensor.proto.ReturnCode.Unavailable)

                else:
                    logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<red>UnknownException</red>, <white>from</white>:{} ', caller.endpoint )
                    return zeros, torch.tensor(bittensor.proto.ReturnCode.UnknownException)

            # ---- Catch Unknown Errors ----
            except Exception as e:
                logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<red>UnknownException</red>, <white>from</white>:{}, <white>message</white>:<red>{}</red>', caller.endpoint, e)
                return zeros, torch.tensor(bittensor.proto.ReturnCode.UnknownException)

            # ---- Check tensor response length ----
            if len(response.tensors) == 0:
                logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<yellow>EmptyResponse</yellow>, <white>from</white>:{}', caller.endpoint )
                return zeros, torch.tensor(bittensor.proto.ReturnCode.EmptyResponse)

            # ---- Deserialize response ----
            try:
                outputs = response.tensors[0]
                deserializer = serialization.get_serializer(  outputs.serializer )
                outputs = deserializer.deserialize( outputs, to_type = bittensor.proto.TensorType.TORCH )

            except Exception as e:
                logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<red>ResponseDeserializationException</red>, <white>from</white>:{}, message:<red>{}</red> ]', caller.endpoint, e)
                return zeros, torch.tensor(bittensor.proto.ReturnCode.ResponseDeserializationException)
        
            # ---- Check response shape ----
            if  outputs.size(0) != inputs.size(0) \
                or outputs.size(1) != inputs.size(1) \
                or outputs.size(2) != bittensor.__network_dim__:
                    logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<red>ResponseShapeException</red>, <white>from</white>:{}, <white>shape</white>:{}, <white>expected</white>:{}', caller.endpoint, list(outputs.shape), [inputs.size(0), inputs.size(1), bittensor.__network_dim__])
                    return zeros, torch.tensor(bittensor.proto.ReturnCode.ResponseShapeException)

            # ---- Safe catch NaNs and replace with 0.0 ----
            outputs = torch.where(torch.isnan(outputs), torch.zeros_like(outputs), outputs)
            
        # ---- Catch all ----
        except Exception as e:
            logger.error('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<red>UnknownException</red>, <white>from</white>:{}, <white>message</white>:<red>{}</red>', caller.endpoint, e)
            return zeros, torch.tensor(bittensor.proto.ReturnCode.UnknownException)

        # ---- Return ----
        logger.debug('<white>Dendrite</white> <green>Forward Response</green> <--- <white>code</white>:<green>Success</green>, <white>from</white>:{}, <white>outputs</white>:{}', caller.endpoint, outputs.shape)
        return outputs, torch.tensor(response.return_code)
Пример #17
0
def install_agent(request):
    from knox.models import AuthToken

    client_id = request.data["client"]
    site_id = request.data["site"]
    version = settings.LATEST_AGENT_VER
    arch = request.data["arch"]

    # response type is blob so we have to use
    # status codes and render error message on the frontend
    if arch == "64" and not os.path.exists(
            os.path.join(settings.EXE_DIR, "meshagent.exe")):
        return Response(status=status.HTTP_406_NOT_ACCEPTABLE)

    if arch == "32" and not os.path.exists(
            os.path.join(settings.EXE_DIR, "meshagent-x86.exe")):
        return Response(status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)

    inno = (f"winagent-v{version}.exe"
            if arch == "64" else f"winagent-v{version}-x86.exe")
    download_url = settings.DL_64 if arch == "64" else settings.DL_32

    _, token = AuthToken.objects.create(
        user=request.user, expiry=dt.timedelta(hours=request.data["expires"]))

    if request.data["installMethod"] == "exe":
        from tacticalrmm.utils import generate_winagent_exe

        return generate_winagent_exe(
            client=client_id,
            site=site_id,
            agent_type=request.data["agenttype"],
            rdp=request.data["rdp"],
            ping=request.data["ping"],
            power=request.data["power"],
            arch=arch,
            token=token,
            api=request.data["api"],
            file_name=request.data["fileName"],
        )

    elif request.data["installMethod"] == "manual":
        cmd = [
            inno,
            "/VERYSILENT",
            "/SUPPRESSMSGBOXES",
            "&&",
            "ping",
            "127.0.0.1",
            "-n",
            "5",
            "&&",
            r'"C:\Program Files\TacticalAgent\tacticalrmm.exe"',
            "-m",
            "install",
            "--api",
            request.data["api"],
            "--client-id",
            client_id,
            "--site-id",
            site_id,
            "--agent-type",
            request.data["agenttype"],
            "--auth",
            token,
        ]

        if int(request.data["rdp"]):
            cmd.append("--rdp")
        if int(request.data["ping"]):
            cmd.append("--ping")
        if int(request.data["power"]):
            cmd.append("--power")

        resp = {
            "cmd": " ".join(str(i) for i in cmd),
            "url": download_url,
        }

        return Response(resp)

    elif request.data["installMethod"] == "powershell":

        ps = os.path.join(settings.BASE_DIR, "core/installer.ps1")

        with open(ps, "r") as f:
            text = f.read()

        replace_dict = {
            "innosetupchange": inno,
            "clientchange": str(client_id),
            "sitechange": str(site_id),
            "apichange": request.data["api"],
            "atypechange": request.data["agenttype"],
            "powerchange": str(request.data["power"]),
            "rdpchange": str(request.data["rdp"]),
            "pingchange": str(request.data["ping"]),
            "downloadchange": download_url,
            "tokenchange": token,
        }

        for i, j in replace_dict.items():
            text = text.replace(i, j)

        file_name = "rmm-installer.ps1"
        ps1 = os.path.join(settings.EXE_DIR, file_name)

        if os.path.exists(ps1):
            try:
                os.remove(ps1)
            except Exception as e:
                logger.error(str(e))

        with open(ps1, "w") as f:
            f.write(text)

        if settings.DEBUG:
            with open(ps1, "r") as f:
                response = HttpResponse(f.read(), content_type="text/plain")
                response[
                    "Content-Disposition"] = f"inline; filename={file_name}"
                return response
        else:
            response = HttpResponse()
            response[
                "Content-Disposition"] = f"attachment; filename={file_name}"
            response["X-Accel-Redirect"] = f"/private/exe/{file_name}"
            return response
Пример #18
0
 def print(self):
     if self.has_error:
         logger.error("Could not complete run")
     pass
Пример #19
0
    def _make_torrent_options_dict(self, config, entry):

        opt_dic = {}

        for opt_key in (
                'path',
                'add_paused',
                'honor_limits',
                'bandwidth_priority',
                'max_connections',
                'max_up_speed',
                'max_down_speed',
                'ratio',
                'main_file_only',
                'main_file_ratio',
                'magnetization_timeout',
                'include_subs',
                'content_filename',
                'include_files',
                'skip_files',
                'rename_like_files',
                'queue_position',
        ):
            # Values do not merge config with task
            # Task takes priority then config is used
            if opt_key in entry:
                opt_dic[opt_key] = entry[opt_key]
            elif opt_key in config:
                opt_dic[opt_key] = config[opt_key]

        options = {'add': {}, 'change': {}, 'post': {}}

        add = options['add']
        if opt_dic.get('path'):
            try:
                path = os.path.expanduser(entry.render(opt_dic['path']))
            except RenderError as e:
                logger.error('Error setting path for {}: {}', entry['title'],
                             e)
            else:
                # Transmission doesn't like it when paths end in a separator
                path = path.rstrip('\\/')
                add['download_dir'] = pathscrub(path)
        # make sure we add it paused, will modify status after adding
        add['paused'] = True

        change = options['change']
        if 'bandwidth_priority' in opt_dic:
            change['bandwidthPriority'] = opt_dic['bandwidth_priority']
        if 'honor_limits' in opt_dic and not opt_dic['honor_limits']:
            change['honorsSessionLimits'] = False
        if 'max_up_speed' in opt_dic:
            change['uploadLimit'] = opt_dic['max_up_speed']
            change['uploadLimited'] = True
        if 'max_down_speed' in opt_dic:
            change['downloadLimit'] = opt_dic['max_down_speed']
            change['downloadLimited'] = True
        if 'max_connections' in opt_dic:
            change['peer_limit'] = opt_dic['max_connections']

        if 'ratio' in opt_dic:
            change['seedRatioLimit'] = opt_dic['ratio']
            if opt_dic['ratio'] == -1:
                # seedRatioMode:
                # 0 follow the global settings
                # 1 override the global settings, seeding until a certain ratio
                # 2 override the global settings, seeding regardless of ratio
                change['seedRatioMode'] = 2
            else:
                change['seedRatioMode'] = 1

        if 'queue_position' in opt_dic:
            change['queuePosition'] = opt_dic['queue_position']

        post = options['post']
        # set to modify paused status after
        if 'add_paused' in opt_dic:
            post['paused'] = opt_dic['add_paused']
        if 'main_file_only' in opt_dic:
            post['main_file_only'] = opt_dic['main_file_only']
        if 'main_file_ratio' in opt_dic:
            post['main_file_ratio'] = opt_dic['main_file_ratio']
        if 'magnetization_timeout' in opt_dic:
            post['magnetization_timeout'] = opt_dic['magnetization_timeout']
        if 'include_subs' in opt_dic:
            post['include_subs'] = opt_dic['include_subs']
        if 'content_filename' in opt_dic:
            try:
                post['content_filename'] = entry.render(
                    opt_dic['content_filename'])
            except RenderError as e:
                logger.error('Unable to render content_filename {}: {}',
                             entry['title'], e)
        if 'skip_files' in opt_dic:
            post['skip_files'] = opt_dic['skip_files']
            if not isinstance(post['skip_files'], list):
                post['skip_files'] = [post['skip_files']]
        if 'include_files' in opt_dic:
            post['include_files'] = opt_dic['include_files']
            if not isinstance(post['include_files'], list):
                post['include_files'] = [post['include_files']]
        if 'rename_like_files' in opt_dic:
            post['rename_like_files'] = opt_dic['rename_like_files']
        return options
Пример #20
0
 async def cog_command_error(self, ctx: commands.Context,
                             error: commands.CommandError) -> None:
     logger.error(error)
     logger.error(traceback.format_exc())
     await ctx.send(f"{ctx.author.mention}, {error}")
Пример #21
0
load_dotenv()
environ["no_proxy"] = "*"

from auth_swust import Login
from loguru import logger

from src.gen_ics import generate_class_schedule
from src.parse_api import get_course_api


# 登录部分
username = _("c2i_username")
password = _("c2i_password")
path = _("c2i_path", "./")

if not username or not password:
    raise ValueError("未设置用户名密码")

username_li = username.split(",")
password_li = password.split(",")

for u, p in zip(username_li, password_li):
    login = Login(u, p)
    res, _ = login.try_login()
    if res:
        sess = login.sess
        api = get_course_api(sess)
        generate_class_schedule(api, u, path)
    else:
        logger.error(f"{u} 生成日历出错!")
Пример #22
0
 def error(update, context):
     logger.warning('Update "%s" caused error "%s"', update, context.error)
     logger.error(traceback.format_exc())
Пример #23
0
    def search(self, task, entry, config=None):
        """
        Search for name from piratebay.
        """
        if not isinstance(config, dict):
            config = {}
        self.set_urls(config.get('url', URL))
        sort = SORT.get(config.get('sort_by', 'seeds'))
        if config.get('sort_reverse'):
            sort += 1
        if isinstance(config.get('category'), int):
            category = config['category']
        else:
            category = CATEGORIES.get(config.get('category', 'all'))
        filter_url = '/0/%d/%d' % (sort, category)

        entries = set()
        for search_string in entry.get('search_strings', [entry['title']]):
            query = normalize_unicode(search_string)

            # TPB search doesn't like dashes or quotes
            query = query.replace('-', ' ').replace("'", " ")

            # urllib.quote will crash if the unicode string has non ascii characters, so encode in utf-8 beforehand
            url = '%s/search/%s%s' % (self.url, quote(
                query.encode('utf-8')), filter_url)
            logger.debug('Using {} as piratebay search url', url)
            page = task.requests.get(url).content
            soup = get_soup(page)
            for link in soup.find_all('a', attrs={'class': 'detLink'}):
                entry = Entry()
                entry['title'] = self.extract_title(link)
                if not entry['title']:
                    logger.error(
                        'Malformed search result. No title or url found. Skipping.'
                    )
                    continue
                href = link.get('href')
                if href.startswith('/'):  # relative link?
                    href = self.url + href
                entry['url'] = href
                row = link.parent.parent.parent
                description = row.find_all('a', attrs={'class': 'detDesc'})
                if description and description[0].contents[0] == "piratebay ":
                    logger.debug('Advertisement entry. Skipping.')
                    continue
                tds = row.find_all('td')
                entry['torrent_seeds'] = int(tds[-2].contents[0])
                entry['torrent_leeches'] = int(tds[-1].contents[0])
                entry['torrent_availability'] = torrent_availability(
                    entry['torrent_seeds'], entry['torrent_leeches'])
                # Parse content_size
                size_text = link.find_next(attrs={
                    'class': 'detDesc'
                }).get_text()
                if size_text:
                    size = re.search(r'Size (\d+(\.\d+)?\xa0(?:[PTGMK])?i?B)',
                                     size_text)
                    if size:
                        entry['content_size'] = parse_filesize(size.group(1))
                    else:
                        logger.error(
                            'Malformed search result? Title: "{}", No size? {}',
                            entry['title'],
                            size_text,
                        )

                entries.add(entry)

        return sorted(entries,
                      reverse=True,
                      key=lambda x: x.get('torrent_availability'))
Пример #24
0
    def start_server_over_ssh(self):
        try:
            client = SSHClient()
            client.load_system_host_keys()
            client.set_missing_host_key_policy(WarningPolicy())
            client.connect(
                self.ssh_host,
                port=self.ssh_port,
                username=self.ssh_user,
                password=self.text,
            )
            transport = client.get_transport()
            ip, _ = transport.getpeername()
            if ip:
                self.update_ip_linedt_signal.emit(ip)
                logger.info(f"IP for {self.ssh_host} detected as {ip}.")
            ws_name = self.run_config["workspace_name"]
            server_port = self.run_config["server_port"]
            # TODO Check if the server port is already in use
            logger.info(
                f"Checking if server port: {server_port} at ip: {ip} is already in use."
            )
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            result = sock.connect_ex((ip, int(server_port)))
            if result == 0:
                logger.error(f"Port {server_port} is already open.")
                self.button_message_signal.emit(
                    [f"Port: {server_port} at ip: {ip} is already in use!", "maroon", 3]
                )
                self.error_signal.emit()
                sock.close()
                client.close()
                return
            else:
                logger.info(f"Port {server_port} is not open.")
            sock.close()
            cuda_command = "module load cuda/10.1\n"
            command = (
                "/dls_sw/apps/SuRVoS2/s2_conda/bin/python -u "
                "/dls/science/groups/das/SuRVoS/s2/s2_dec/SuRVoS2/survos.py "
                f"start_server {ws_name} {server_port} > {date.today()}_survos2.log &\n"
            )
            logger.info(f"Running command on remote machine: {command}")

            session = transport.open_session()
            session.setblocking(0)  # Set to non-blocking mode
            session.get_pty()
            session.invoke_shell()
            # Send commands
            session.send(cuda_command)
            session.send(command)
            # Loop for 15 seconds
            self.button_message_signal.emit(
                [f"Starting server on {self.ssh_host}. Please Wait!", "navy", 14]
            )
            start = time.time()
            while time.time() - start < 15:
                if session.recv_ready():
                    data = session.recv(512)
                    print(data.decode(), flush=True)
                time.sleep(1)  # Yield CPU so we don't take up 100% usage...
            self.finished.emit()

        except AuthenticationException:
            logger.error("SSH Authentication failed!")
            self.button_message_signal.emit(["Incorrect Password!", "maroon", 3])
            self.error_signal.emit()
Пример #25
0
    async def is_sqlite(self, file: Path):
        async with aiofiles.open(str(file), "rb") as f:
            sixteen = await f.read(16)
            return sixteen == b"SQLite format 3\000"

    @logger.catch
    async def process_sqlite(self, sql_file: Path):
        logger.info(f"Processing {sql_file.stem} ")
        try:
            async with aiosqlite.connect(sql_file) as db:
                async with db.execute("SELECT * from results") as cursor:
                    while row := await cursor.fetchone():
                        await self._process_json(row[1])
            logger.info(f"Finished processing {sql_file.name} ")
        except Exception as e:
            logger.error(e)

    async def process(self, upload: SpooledTemporaryFile, filename: str,
                      neo_user: str, neo_pass: str):

        server = ("bolt://stormspotter-neo4j:7687"
                  if os.environ.get("DOCKER_STORMSPOTTER") else
                  "bolt://localhost:7687")
        # TODO: Pass whole neo4j params from frontend or use .env for server
        self.neo = Neo4j(server=server, user=neo_user, password=neo_pass)
        if zipfile.is_zipfile(upload):
            self.status = f"Unzipping {filename}"
            tempdir = mkdtemp()
            zipfile.ZipFile(upload).extractall(tempdir)
            sqlite_files = [
                f for f in Path(tempdir).glob("*") if await self.is_sqlite(f)
Пример #26
0
def main():
    # Launching appium
    parser = argparse.ArgumentParser(
        description=
        'instr = True to collect code coverage, algo = RL to use RL or random'
        'to choose random approach')
    parser.add_argument('--timesteps', type=int, default=3600)
    parser.add_argument('--iterations', type=int, default=10)
    parser.add_argument('--instr_jacoco', default=False, action='store_true')
    parser.add_argument('--instr_emma', default=False, action='store_true')
    parser.add_argument('--save_policy', default=False, action='store_true')
    parser.add_argument('--real_device', default=False, action='store_true')
    parser.add_argument('--rotation', default=False, action='store_true')
    parser.add_argument('--internet', default=False, action='store_true')
    parser.add_argument('--menu', default=False, action='store_true')
    parser.add_argument('--algo',
                        choices=['TD3', 'SAC', 'random', 'Q', 'DDPG', 'test'],
                        type=str,
                        required=True)
    parser.add_argument('--emu',
                        choices=['normal', 'headless'],
                        type=str,
                        required=False,
                        default='normal')
    parser.add_argument('--appium_port', type=int, required=True)
    parser.add_argument('--platform_name',
                        choices=['Android', 'iOS'],
                        type=str,
                        default='Android')
    parser.add_argument('--platform_version', type=str, default='9.0')
    parser.add_argument('--udid', type=str, default='emulator-5554')
    parser.add_argument('--device_name', type=str, default='test0')
    parser.add_argument('--android_port', type=str, default='5554')
    parser.add_argument('--apps', type=str, required=True)
    parser.add_argument('--timer', type=int, default=60)
    parser.add_argument('--max_timesteps', type=int, default=250)
    parser.add_argument('--pool_strings', type=str, default='strings.txt')
    parser.add_argument('--trials_per_app', type=int, default=3)

    args = parser.parse_args()
    save_policy = args.save_policy
    max_trials = args.trials_per_app
    if max_trials <= 0:
        raise Exception('max_trials must be > 0')
    timesteps = args.timesteps
    max_timesteps = args.max_timesteps
    pool_strings = args.pool_strings
    N = args.iterations
    instr_jacoco = args.instr_jacoco
    instr_emma = args.instr_emma
    if instr_emma and instr_jacoco:
        raise AssertionError
    real_device = args.real_device
    algo = args.algo
    emu = args.emu
    appium_port = args.appium_port
    platform_name = args.platform_name
    platform_version = args.platform_version
    udid = args.udid
    # Check this in case of name error
    device_name = args.device_name.replace('_', ' ')
    rotation = args.rotation
    internet = args.internet
    merdoso_button_menu = args.menu
    android_port = args.android_port
    apps = [p for p in args.apps.split(",")]
    timer = args.timer

    if emu == 'normal':
        is_headless = False
    else:
        is_headless = True

    # Put all APKs in folder apps

    # path = os.path.join(os.getcwd(), app_path)
    my_log = logger.add(
        os.path.join('logs', 'logger.log'),
        format="{time} {level} {message}",
        filter=lambda record: record["level"].name == "INFO" or "ERROR")

    appium = AppiumLauncher(appium_port)
    if real_device:
        emulator = None
    else:
        emulator = EmulatorLauncher(emu, device_name, android_port)

    if len(apps) == 0:
        raise Exception(f'The folder is empty or the path is wrong')
        exit()
    for application in apps:
        app_name = os.path.basename(os.path.splitext(application)[0])
        logger.info(f'now testing: {app_name}\n')
        cycle = 0
        trial = 0
        coverage_dict_template = {}
        try:
            exported_activities, services, receivers, providers, string_activities, my_package = apk_analyzer.analyze(
                application, coverage_dict_template)
            ready = True
        except Exception as e:
            logger.error(f'{e} at app: {application}')
            ready = False
        if ready:
            package = None
            while cycle < N:
                logger.info(f'app: {app_name}, test {cycle} of {N} starting')
                # coverage dir
                coverage_dir = ''
                if instr_emma or instr_jacoco:
                    coverage_dir = os.path.join(os.getcwd(), 'coverage',
                                                app_name, algo, str(cycle))
                    os.makedirs(coverage_dir, exist_ok=True)
                # logs dir
                log_dir = os.path.join(os.getcwd(), 'logs', app_name, algo,
                                       str(cycle))
                os.makedirs(log_dir, exist_ok=True)
                policy_dir = os.path.join(os.getcwd(), 'policies', app_name,
                                          algo)
                os.makedirs(policy_dir, exist_ok=True)
                # instantiating timer in minutes
                coverage_dict = dict(coverage_dict_template)
                widget_list = []
                bug_set = set()
                visited_activities = []
                clicked_buttons = []
                number_bugs = []

                os.system(f'adb -s {udid} install -t -r {application}')
                result = subprocess.run([
                    "adb", "shell", "su", "0", "find", "/data/data/", "-type",
                    "d", "-name", f'"{my_package}*"'
                ],
                                        capture_output=True)
                package = result.stdout.decode('utf-8').strip('\n').rsplit(
                    '/')[-1]

                try:
                    app = RLApplicationEnv(
                        coverage_dict,
                        app_path=application,
                        list_activities=list(coverage_dict.keys()),
                        widget_list=widget_list,
                        bug_set=bug_set,
                        coverage_dir=coverage_dir,
                        log_dir=log_dir,
                        visited_activities=visited_activities,
                        clicked_buttons=clicked_buttons,
                        number_bugs=number_bugs,
                        string_activities=string_activities,
                        appium_port=appium_port,
                        internet=internet,
                        instr_emma=instr_emma,
                        instr_jacoco=instr_jacoco,
                        merdoso_button_menu=merdoso_button_menu,
                        rotation=rotation,
                        platform_name=platform_name,
                        platform_version=platform_version,
                        udid=udid,
                        pool_strings=pool_strings,
                        device_name=device_name,
                        max_episode_len=max_timesteps,
                        is_headless=is_headless,
                        appium=appium,
                        emulator=emulator,
                        package=package,
                        exported_activities=exported_activities,
                        services=services,
                        receivers=receivers)
                    if algo == 'TD3':
                        algorithm = TD3Algorithm()
                    elif algo == 'random':
                        algorithm = RandomAlgorithm()
                    elif algo == 'SAC':
                        algorithm = SACAlgorithm()
                    elif algo == 'Q':
                        algorithm = QLearnAlgorithm()
                    elif algo == 'DDPG':
                        algorithm = DDPGAlgorithm()
                    elif algo == 'test':
                        algorithm = TestApp()
                    flag = algorithm.explore(app, emulator, appium, timesteps,
                                             timer, save_policy, policy_dir,
                                             cycle)
                    if flag:
                        with open(f'logs{os.sep}success.log', 'a+') as f:
                            f.write(f'{app_name}\n')
                    else:
                        with open(f'logs{os.sep}error.log', 'a+') as f:
                            f.write(f'{app_name}\n')
                except Exception as e:
                    logger.error(e)
                    flag = False
                try:
                    # bye handler, it has been an honour
                    os.kill(app.bug_proc_pid, 9)
                except Exception:
                    pass
                if flag:
                    try:
                        app.reset()
                        app.driver.quit()
                    except InvalidSessionIdException:
                        pass
                    except WebDriverException:
                        pass
                    logger.remove(app.logger_id)
                    logger.remove(app.bug_logger_id)
                    # save_pickles(algo, app_name, cycle, clicked_buttons, visited_activities, number_bugs, bug_set)
                    logger.info(
                        f'app: {app_name}, test {cycle} of {N} ending\n')
                    cycle += 1
                else:
                    trial += 1
                    try:
                        logger.remove(app.logger_id)
                        logger.remove(app.bug_logger_id)
                    except Exception:
                        pass
                    if trial == max_trials:
                        try:
                            app.driver.quit()
                        except Exception:
                            pass
                        logger.error(
                            f'Too Many Times tried, app: {app_name}, iteration: {cycle}'
                        )
                        break
            # in order to avoid faulty behavior we uninstall the application
            if package:
                os.system(f'adb -s {udid} uninstall {package}')
    if emulator is not None:
        emulator.terminate()
    appium.terminate()
    return 0
Пример #27
0
def parse_configs(file_name):
    with open(file_name, encoding='utf-8') as y:
        y_content = yaml.load(y, Loader=yaml.Loader)
    return y_content['addr'], y_content['port']


def get_msg(msg_in):
    json_in = json.loads(msg_in)
    return json_in


def send_msg(destination_host, msg):
    g = json.dumps(msg)
    destination_host.send(g.encode('utf-8'))


timestr = time.ctime(time.time()) + "\n"

logger.debug("get_addr_port(dedug)!")
logger.info("get_addr_port(dedug)!")
logger.error("get_addr_port(dedug)!")

logger.debug("parse_configs(dedug)!")
logger.info("parse_configs(dedug)!")
logger.error("parse_configs(dedug)!")

logger.debug("send_msg(dedug)!")
logger.info("send_msg(dedug)!")
logger.error("send_msg(dedug)!")
Пример #28
0
    def getDefaultBrowserByUserName(self, userName):
        output = self.volumeInfo

        if "FAT" or "NTFS" in output.split(" ")[0]:
            os.chdir("%s/%s/" % (self.mountDir, output.split(" ")[2]))
            logger.info(
                "Loading every user info!")  # TODO:It should be per user!
            try:
                os.chdir("Users/")
            except FileNotFoundError:
                logger.error("Couldn't find Users folder!")
                return None
            for userDir in os.listdir("."):
                if userName == userDir:
                    if os.access(
                            "{0}/AppData/Local/Microsoft/Windows/UsrClass.dat".
                            format(userDir), os.F_OK | os.R_OK):
                        registry = Registry.Registry(
                            "{0}/NTUSER.DAT".format(userDir))
                    else:
                        logger.warning("Couldn't find user registry on %s" %
                                       userDir)
                        continue
                    try:
                        open1 = registry.open("http\\shell\\open\\command")
                    except Registry.RegistryKeyNotFoundException:
                        logger.error(
                            "Couldn't find UsrClass http registry on user {0}".
                            format(userDir))
                        continue
                    logger.info("Now showing %s default http open info!" %
                                userDir)
                    logger.debug("Default HTTP open handler : {0}".format(
                        open1.value("(default)").value()))
                    try:
                        open2 = registry.open("https\\shell\\open\\command")
                    except Registry.RegistryKeyNotFoundException:
                        logger.error(
                            "Couldn't find UsrClass https registry on user {0}"
                            .format(userDir))
                        continue
                    logger.info("Now showing %s default http open info!" %
                                userDir)
                    logger.debug("Default HTTPS open handler : {0}".format(
                        open2.value("(default)").value()))
                    try:
                        open3 = registry.open(".html")
                    except Registry.RegistryKeyNotFoundException:
                        logger.error(
                            "Couldn't find UsrClass http registry on user {0}".
                            format(userDir))
                        continue
                    html_value = open3.value("(default)").value()
                    try:
                        open4 = registry.open(html_value)
                    except Registry.RegistryKeyNotFoundException:
                        logger.error(
                            ".html association on user {0} may be broken!".
                            format(userDir))
                        continue
                    logger.info("Now showing %s default .html open info!" %
                                userDir)
                    logger.debug("Default .html open handler : {0}".format(
                        open4.subkey("shell").subkey("open").subkey(
                            "command").value("(default)").value()))
Пример #29
0
    def _add_node(
        self,
        node,
        lib_name,
        args,
        target_node=None,
        recovery=False,
        ledger_dir=None,
        copy_ledger_read_only=False,
        read_only_ledger_dir=None,
        from_snapshot=False,
        snapshot_dir=None,
    ):
        forwarded_args = {
            arg: getattr(args, arg)
            for arg in infra.network.Network.node_args_to_forward
        }

        # Contact primary if no target node is set
        if target_node is None:
            target_node, _ = self.find_primary(
                timeout=args.ledger_recovery_timeout if recovery else 3
            )
        LOG.info(f"Joining from target node {target_node.node_id}")

        # Only retrieve snapshot from target node if the snapshot directory is not
        # specified
        if from_snapshot and snapshot_dir is None:
            snapshot_dir = self.get_committed_snapshots(target_node)
            assert os.listdir(
                snapshot_dir
            ), f"There are no snapshots to resume from in directory {snapshot_dir}"

        committed_ledger_dir = None
        current_ledger_dir = None
        if snapshot_dir is not None:
            LOG.info(f"Joining from snapshot directory: {snapshot_dir}")
            # Only when joining from snapshot, retrieve ledger dirs from target node
            # if the ledger directories are not specified. When joining without snapshot,
            # the entire ledger will be retransmitted by primary node
            current_ledger_dir = ledger_dir or None
            committed_ledger_dir = read_only_ledger_dir or None
            if copy_ledger_read_only and read_only_ledger_dir is None:
                current_ledger_dir, committed_ledger_dir = target_node.get_ledger(
                    include_read_only_dirs=True
                )

        node.join(
            lib_name=lib_name,
            workspace=args.workspace,
            label=args.label,
            common_dir=self.common_dir,
            target_rpc_address=f"{target_node.host}:{target_node.rpc_port}",
            snapshot_dir=snapshot_dir,
            ledger_dir=current_ledger_dir,
            read_only_ledger_dir=committed_ledger_dir,
            **forwarded_args,
        )

        # If the network is opening, node are trusted without consortium approval
        if self.status == ServiceStatus.OPENING:
            try:
                node.wait_for_node_to_join(timeout=JOIN_TIMEOUT)
            except TimeoutError:
                LOG.error(f"New node {node.node_id} failed to join the network")
                raise
            node.network_state = infra.node.NodeNetworkState.joined
        except Exception:
            progress.clear()
            logger.warning("Shibboleth rejected your username and/or password.")
            exit()

        rq = session.post(D2L_BASEURL + "/Shibboleth.sso/SAML2/POST", data=dta, allow_redirects=True)
        session.get(D2L_BASEURL + "/d2l/lp/auth/login/ProcessLoginActions.d2l")

    logger.info("Successfully logged into MyCourses")

    with halo.Halo(text="Discovering Courses", spinner="dots") as progress:

        # We need to get the XSRF.Token to move forward
        bph = session.get("{}/d2l/le/manageCourses/search/6605".format(D2L_BASEURL))
        if bph.status_code != 200:
            logger.error("Course Query failed. Invalid response code! Expected 200, got {}", bph.status_code)
            exit(1)

        token = get_xfrs_token(bph.text)
        now = datetime.datetime.now()
        query_data = {
            "gridPartialInfo$_type": "D2L.LP.Web.UI.Desktop.Controls.GridPartialArgs",
            "gridPartialInfo$SortingInfo$SortField": "OrgUnitName",
            "gridPartialInfo$SortingInfo$SortDirection": "0",
            "gridPartialInfo$NumericPagingInfo$PageNumber": "1",
            "gridPartialInfo$NumericPagingInfo$PageSize": "100",
            "searchTerm": "",
            "status": "-1",
            "toStartDate$Year": str(now.year),
            "toStartDate$Month": str(now.month),
            "toStartDate$Day": str(now.day),
Пример #31
0
    def start_and_join(self, args):
        """
        Starts a CCF network.
        :param args: command line arguments to configure the CCF nodes.
        """
        self.common_dir = get_common_folder_name(args.workspace, args.label)

        assert (
            args.gov_script is not None
        ), "--gov-script argument must be provided to start a network"

        self._setup_common_folder(args.gov_script)

        mc = max(1, args.initial_member_count)
        initial_members_info = []
        for i in range(mc):
            initial_members_info += [
                (
                    i,
                    (i < args.initial_recovery_member_count),
                    {"is_operator": True}
                    if (i < args.initial_operator_count)
                    else None,
                )
            ]

        self.consortium = infra.consortium.Consortium(
            self.common_dir,
            self.key_generator,
            self.share_script,
            initial_members_info,
            args.participants_curve,
        )
        initial_users = list(range(max(0, args.initial_user_count)))
        self.create_users(initial_users, args.participants_curve)

        primary = self._start_all_nodes(args)
        self.wait_for_all_nodes_to_catch_up(primary)
        LOG.success("All nodes joined network")

        self.consortium.activate(primary)

        if args.js_app_script:
            LOG.error(
                "--js-app-script is deprecated - update to --js-app-bundle instead"
            )
            infra.proc.ccall(
                "cp", args.js_app_script, args.binary_dir
            ).check_returncode()
            self.consortium.set_js_app(
                remote_node=primary, app_script_path=args.js_app_script
            )

        if args.js_app_bundle:
            self.consortium.deploy_js_app(
                remote_node=primary, app_bundle_path=args.js_app_bundle
            )

        for path in args.jwt_issuer:
            self.consortium.set_jwt_issuer(remote_node=primary, json_path=path)

        self.consortium.add_users(primary, initial_users)
        LOG.info(f"Initial set of users added: {len(initial_users)}")

        self.consortium.open_network(remote_node=primary)
        self.status = ServiceStatus.OPEN
        LOG.success("***** Network is now open *****")
Пример #32
0
                img_file = f'frame_{counter}.jpg'
                cv2.imwrite(img_file, frame)
                response = f'frame captured from {output["src"]} and saved to {img_file}'
            else:
                response = queue_img(data=output,
                                     queue_url=output['queue_url'])

            logger.info(response)
            time.sleep(data['sleep'])
            counter += 1
        except AttributeError:
            # re-establish video capture if 'NoneType' error occurs because the connection has been dropped.
            logger.error(
                f"re-establishing video connection with {data['src']}")
            cap = make_capture_obj(data['src'])
        except:
            logger.error(traceback.format_exc())
            continue


if __name__ == '__main__':
    """entry point"""
    try:
        data = json.loads(os.getenv('data'))
        logger.info(f'received data: {data}')
    except TypeError as e:
        logger.error('no environmental variables available')
        raise

    stream_feed(data)
Пример #33
0
    def forward(self, inputs: torch.Tensor, mode: bittensor.proto.Modality) -> Tuple[torch.Tensor, int]:
        r""" Torch.nn.Module forward call: Triggers the grpc call to the remote neuron on the associated endpoint.
            Call returns the output tensor and a bittensor.proto.ReturnCode.

            Args:
                inputs (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
                    Single torch tensor to be sent to the remote neuron endpoint.

                mode (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
                    Bittensor forward modality type. Enum in [TEXT, IMAGE, TENSOR]

            Returns:
                output (:obj:`Tuple[torch.FloatTensor, torch.LongTensor]`, `required`):
                    Result tuple from the forward call.

        """
        # ---- On Backoff: We dont make an RPC and return zeros instead ----  
        if self.config.receptor.do_backoff and self.backoff >= 1:
            outputs = nill_response_for(inputs)
            code = torch.tensor(bittensor.proto.ReturnCode.Backoff)

        # ---- On Not-backoff: We make the Forward RPC ---- 
        else:
            try:
                # Make and time the query.
                outputs, code = _ReceptorCall.apply(self, DUMMY, inputs, mode)

            # ---- On unknown failure: we return zeros and unknown code ---- 
            except Exception as e:
                logger.error('Uncaught error in forward call with error {}, {}'.format( e, traceback.format_exc()))
                outputs = nill_response_for(inputs)
                code = torch.tensor(bittensor.proto.ReturnCode.UnknownException)

        # ---- On Success: set zero backoff and halve the next backoff ---- 
        try:
            self.stats.codes[code.item()] += 1
        except Exception: 
            pass
        if code.item() == bittensor.proto.ReturnCode.Success:
            self.backoff = 0
            self.next_backoff = max(1, self.next_backoff / 2)

        elif code.item() == bittensor.proto.ReturnCode.EmptyRequest:
            # This was a NO-OP
            pass
            
        # ---- On Backoff: Lower backoff value by 1 ---- 
        elif code.item() == bittensor.proto.ReturnCode.Backoff:
            # We slowly lower the backoff count until 0.
            self.backoff -= 1

        # ---- On failure: Increase backoff and double next_backoff towards max value ---- 
        # Catch all non-success / non-backoff codes and trigger backoff increase. This catches
        # serialization errors, timeouts, unavailable endpoints etc. Note, it can 
        # be triggered by invalid requests on this side of the query.
        else:
            # ---- Do backoff: incease backoff until max_backoff is reached ---- 
            self.backoff = self.next_backoff
            self.next_backoff = min(self.config.receptor.max_backoff, self.next_backoff * 2)

        # ---- Finally return ---- 
        return outputs, code