예제 #1
0
class HaloFeedback(Feedback):
    def __init__(self, message=None):
        self.spinner = Halo(text=message or '', spinner='dots')
        if message and not default_config.debug:
            self.spinner.start()

    def update_message(self, message):
        super().update_message(message)
        if not self.spinner._spinner_id and not default_config.debug:
            self.spinner.start()
        self.spinner.text = (message + ' ...') if self.message else ''

    def succeeded(self):
        self.spinner.text = self.message
        self.spinner.succeed()

    def errored(self, error):
        # self.spinner.text = str(error) if error else self.message
        self.spinner.text = f'{self.message} ... {colored(str(error), "red")}'
        self.spinner.fail()
        sys.exit(1)

    def warning(self, warning):
        self.spinner.text = f'{self.message} ... {colored(str(warning), "yellow")}'
        self.spinner.warn()

    def info(self, message):
        self.spinner.info(message)

    def exists(self, error):
        self.warning('exists')
예제 #2
0
class StatusUpdate(object):
    def __init__(self, title):
        if sys.stdout.isatty():
            self.spinner = Halo(title, spinner='dots')
            self.spinner.start()
        else:
            self.spinner = None
            print(title)

    def complete(self, success, message):
        if self.spinner:
            if success is True:
                self.spinner.succeed(message)
            elif success is False:
                self.spinner.fail(message)
            else:
                self.spinner.warn(message)
            self.spinner.stop()
        else:
            if success is True:
                print("success: ", message)
            elif success is False:
                print("failed: ", message)
            else:
                print("warning: ", message)
예제 #3
0
    def get_access(self):
        """
        Get Upvoted, Downvoted, Gildings, Hidden, and Saved Redditor posts. These
        lists tend to raise a 403 HTTP Forbidden exception, so naturally exception
        handling is necessary. 
        
        Calls previously defined private method:

            self._extract()
        """

        access_interactions = [
            self._downvoted, self._gildings, self._hidden, self._saved,
            self._upvoted
        ]
        access_names = ["downvoted", "gildings", "hidden", "saved", "upvoted"]

        access_halo = Halo(
            color="white",
            text=
            "Extracting Upvoted, Downvoted, Gildings, Hidden, and Saved interactions."
        )

        access_halo.start()
        for category, interaction in zip(access_names, access_interactions):
            try:
                self._extract(interaction, category)
            except PrawcoreException as error:
                access_halo.warn(
                    Fore.YELLOW +
                    "Access to %s interactions forbidden: %s. SKIPPING." %
                    (category.capitalize(), error))
                self._skeleton["data"]["interactions"]["%s" % category].append(
                    "FORBIDDEN")
예제 #4
0
파일: vicl.py 프로젝트: quebin31/icl-vae
    def __init__(self, rho: float, vae_layers, vgg_weights: str = 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'):
        """Build the main model containing both the feature extractor and the 
        variational autoencoder.

        Args:
            rho (float): rho value.
            vgg_weights (str, optional): vgg weights. Defaults to 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'.
        """

        super(Vicl, self).__init__()

        self.rho = rho
        self.extractor = Vgg19()
        self.vae = Vae(layers=vae_layers)
        self.reg_params = {}
        self.class_idents = {}

        # Load pretained weights for the VGG
        halo = Halo(text='Downloading VGG19 saved state', spinner='dots')
        vgg19_state_dict = load_state_dict_from_url(
            vgg_weights, progress=False)
        missing, unexpected = self.extractor.load_state_dict(
            vgg19_state_dict, strict=False
        )

        if not empty(missing):
            halo.warn(f'There are missing keys in the VGG model ({missing})')

        if not empty(unexpected):
            halo.warn(
                f'There are unexpected keys in the VGG model ({unexpected})')
예제 #5
0
def update(**kwargs):
    """Modify user account details."""
    spinner = Halo(text="Updating user information...", spinner="dots").start()
    if not any(kwargs.values()):
        spinner.warn("No values to update were supplied.")
        return
    try:
        api.update_user(kwargs)
        spinner.succeed("Account successfully updated.")
    except Exception:
        spinner.fail("Unable to update account information.")
예제 #6
0
    def test_warning(self):
        """Test warn method
        """
        spinner = Halo(stream=self._stream)
        spinner.start('foo')
        spinner.warn('Warning!')

        output = self._get_test_output()['text']
        pattern = re.compile(r'(⚠|!!) Warning!', re.UNICODE)

        self.assertRegexpMatches(output[-1], pattern)
        spinner.stop()
예제 #7
0
def me():
    """Display information of current user."""
    spinner = Halo(text="Fetching current user's information...",
                   spinner="dots").start()
    if "authorization" not in config:
        spinner.warn("Not logged into Unsplash. Please log in first.")
        return
    try:
        user = api.me()
        spinner.succeed("Information for user @%s:" % user["username"])
        click.echo("")
        pretty_dict(user)
    except Exception:
        spinner.fail("Unable to fetch user information.")
예제 #8
0
파일: cgit.py 프로젝트: justnat3/GitSock
def isSpinning(spinnerState):
    spinnerState_inside = None
    _spinner = Halo(text='Waiting for Git to come online', spinner='dots')
    if spinnerState == State.SPINNER_DEAD:
        _spinner.start()
    while spinnerState == State.SPINNER_ALIVE:
        if spinnerState_inside == State.SPINNER_STOP:
            _spinner.stop()
        if spinnerState_inside == State.SPINNER_SUCCEED:
            _spinner.succeed(text=SPINNER_PERIST_DATA_SUCCEED)
        if spinnerState_inside == State.SPINNER_WARNING:
            _spinner.warn(text=SPINNER_PERIST_DATA_WARNING)
        if spinnerState_inside == State.SPINNER_FAIL:
            _spinner.fail(text=SPINNER_PERIST_DATA_FAIL)
            exit(1)
        if spinnerState_inside == State.SPINNER_SUCCEED_SOCK:
            _spinner.succeed(text='is this getting here')
        if spinnerState_inside == State.SPINNER_FAIL_SOCK:
            _spinner.fail(text=SPINNER_PERIST_DATA_FAIL_SOCK)
            exit(1)
        continue
        time.sleep(.5)
예제 #9
0
def sync(file_format, catalog_path):
    catalog_path = catalog_path or settings.config["catalog_path"]
    file_format = file_format or settings.config["format"]
    if not os.path.isdir(catalog_path):
        os.makedirs(catalog_path)
    skip = 0
    limit = 10
    releases = api.releases(skip=skip, limit=limit)
    while len(releases["results"]) > 0:
        for release in releases["results"]:
            name = f"{release['catalogId'] or release['artistsTitle']} - {release['title']}"
            filename = f"{name}.zip"
            download_link = api.generate_release_link(release["id"],
                                                      file_format)
            outfile = os.path.join(catalog_path, filename)
            spinner = Halo(text=f"[Downloading - 0%] {name}")

            if release["inEarlyAccess"]:
                spinner.warn(f"[Skipping: Early Access] {name}")
                continue
            elif os.path.exists(outfile):
                spinner.succeed(f"[Already Exists] {name}")
                continue

            spinner.start()
            try:
                api.download_file(
                    download_link,
                    outfile,
                    callback=(lambda p: progress_callback(p, spinner, name)))
                spinner.succeed(f"[Downloaded] {name}")
            except Exception as e:
                spinner.fail(f"[Failed] {name}\n{e}")
        skip += limit
        releases = api.releases(skip=skip, limit=limit)
        time.sleep(5)
예제 #10
0
파일: utils.py 프로젝트: jpza/ekscli
class ResourceReporter:
    def __init__(self):
        self.spinner = Halo(text='', spinner='dots')

    def progress(self, resource):
        self.spinner.start(text='[ {0: <10}] {1}'.format(
            resource.status, resource.description))

    def succeed(self, resource):
        self.spinner.succeed(text='[ {0: <10}] {1} [{2}].'.format(
            resource.status, resource.description, resource.resource_id))

    def fail(self, resource):
        self.spinner.fail(text='[ {0: <10}] {1}'.format(
            resource.status, resource.description))

    def warn(self, text):
        self.spinner.warn(text=text)

    def info(self, resource):
        self.spinner.info(text='[ {} ] {} [{}].'.format(
            resource.status, resource.description, resource.resource_id))

    def report_stack_creation(self, name, resources, stack_id):
        cf = boto3.session.Session().resource('cloudformation')
        stack = cf.Stack(stack_id)
        rmap = {r.name: r for r in resources}
        completed = set()
        for r in resources:
            if r.status == Status.provided or r.status == Status.created:
                self.succeed(r)
                completed.add(r.name)

        current = None
        while stack.stack_status in [
                'CREATE_IN_PROGRESS', 'ROLLBACK_IN_PROGRESS'
        ]:
            states = {
                rs.logical_resource_id: rs
                for rs in stack.resource_summaries.all()
            }
            if current:
                rs = states.get(current)
                r = rmap.get(current)
                if rs.resource_status in ['CREATE_IN_PROGRESS']:
                    time.sleep(2)
                    continue
                else:
                    self.report_completed_resource(completed, r, rs,
                                                   ['CREATE_COMPLETE'],
                                                   Status.created)
                    current = None

            for name, rs in iteritems(states):
                r = rmap.get(name)
                if name not in completed and r:
                    if rs.resource_status in ['CREATE_IN_PROGRESS']:
                        current = name
                        r.status = Status.creating
                        self.progress(r)
                        break

                    self.report_completed_resource(completed, r, rs,
                                                   ['CREATE_COMPLETE'],
                                                   Status.created)
            time.sleep(2)
            stack = cf.Stack(stack_id)

        states = {
            rs.logical_resource_id: rs
            for rs in stack.resource_summaries.all()
        }
        if current:
            r = rmap.get(current)
            rs = states.get(current)
            self.report_completed_resource(completed, r, rs,
                                           ['CREATE_COMPLETE'], Status.created)

        for name, rs in iteritems(states):
            r = rmap.get(name)
            if name not in completed and r:
                self.report_completed_resource(completed, r, rs,
                                               ['CREATE_COMPLETE'],
                                               Status.created)

        if stack.stack_status in [
                'CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED',
                'ROLLBACK_COMPLETE'
        ]:
            raise EKSCliException('Failed to create EKS cluster {}: {}'.format(
                name, stack.stack_status))

        return stack

    def report_completed_resource(self, completed, resource, resource_summary,
                                  success_states, success):
        if resource_summary.resource_status in success_states:
            resource.status = success
            resource.resource_id = resource_summary.physical_resource_id
            self.succeed(resource)
        else:
            resource.status = Status.failed
            self.fail(resource)
        completed.add(resource.name)

    def report_stack_deletion(self, name, resources, stack_id):
        cf = boto3.session.Session().resource('cloudformation')
        stack = cf.Stack(stack_id)
        rmap = {r.name: r for r in resources}
        completed = set()
        for r in resources:
            if r.status == Status.not_exist or r.status == Status.deleted or r.status == Status.provided:
                self.succeed(r)
                completed.add(r.name)

        current = None
        while stack.stack_status not in ['DELETE_COMPLETE', 'DELETE_FAILED']:
            states = {
                rs.logical_resource_id: rs
                for rs in stack.resource_summaries.all()
            }
            if current:
                rs = states.get(current)
                r = rmap.get(current)
                if rs.resource_status in [
                        'DELETE_IN_PROGRESS', 'CREATE_COMPLETE',
                        'UPDATE_COMPLETE'
                ]:
                    time.sleep(2)
                    continue
                else:
                    self.report_completed_resource(completed, r, rs,
                                                   ['DELETE_COMPLETE'],
                                                   Status.deleted)
                    current = None

            for name, rs in iteritems(states):
                r = rmap.get(name)
                if name not in completed and r:
                    if rs.resource_status in [
                            'DELETE_IN_PROGRESS', 'CREATE_COMPLETE',
                            'UPDATE_COMPLETE'
                    ]:
                        current = name
                        r.status = Status.deleting
                        self.progress(r)
                        break

                    self.report_completed_resource(completed, r, rs,
                                                   ['DELETE_COMPLETE'],
                                                   Status.deleted)
            time.sleep(2)
            stack = cf.Stack(stack_id)

        states = {
            rs.logical_resource_id: rs
            for rs in stack.resource_summaries.all()
        }
        if current:
            r = rmap.get(current)
            rs = states.get(current)
            self.report_completed_resource(completed, r, rs,
                                           ['DELETE_COMPLETE'], Status.deleted)

        for name, rs in iteritems(states):
            r = rmap.get(name)
            if name not in completed and r:
                self.report_completed_resource(completed, r, rs,
                                               ['DELETE_COMPLETE'],
                                               Status.deleted)

        if stack.stack_status in ['DELETE_FAILED']:
            raise EKSCliException('Failed to create EKS cluster {}: {}'.format(
                name, stack.stack_status))

        return
예제 #11
0
def announce_grade(
    homework_prefix: str = typer.Argument(
        ..., help="prefix of the target homework"),
    feedback_source_repo: Optional[str] = typer.Option(
        None, show_default=True, help="Repo contains students' feedbacks"),
    only_id: Optional[str] = typer.Option(default=None,
                                          help="only id to announce"),
    token: Optional[str] = opt_github_token,
    org: str = opt_gh_org,
    dry: bool = typer.Option(
        False, "--dry", help="dry run, do not publish result to the remote"),
    yes: bool = opt_all_yes,
):
    """Announce student grades to each hw repo"""
    ensure_config_exists()

    def fallback(val, fallback_value):
        return val if val else fallback_value

    # Handle default value manually because we'll change our config after app starts up
    token: str = fallback(token,
                          app_context.config.github.personal_access_token)
    org: str = fallback(org, app_context.config.github.organization)
    feedback_source_repo: str = fallback(
        feedback_source_repo,
        app_context.config.announce_grade.feedback_source_repo)

    ensure_gh_token(token)
    if not (yes or
            typer.confirm(f"Add annouce_grade to {org}/{homework_prefix}?")):
        raise typer.Abort()

    # TODO: use logging lib to log messages
    spinner = Halo(stream=sys.stderr)

    student_feedback_title = f"Grade for {homework_prefix}"

    gstudents = Gstudents()
    feedback_vars = gstudents.left_join(homework_prefix)

    # Clone feedback repo & set needed variables
    cur = Path(".")

    for d in cur.glob("feedback-tmp-*"):
        shutil.rmtree(d)
    spinner.info("delete dated folder")

    root_folder = Path(
        tempfile.mkdtemp(
            prefix="feedback-tmp-{}-".format(
                datetime.now().strftime("%b%d%H%M%S")),
            dir=".",
        ))
    spinner.succeed(f"Create tmp folder {root_folder}")

    feedback_repo_path = root_folder / "feedbacks"

    spinner.info(f"cloning feeback source repo : {feedback_source_repo}")
    _, t = measure_time(sp.run)(
        [
            "git",
            "clone",
            f"https://github.com/{org}/{feedback_source_repo}.git",
            feedback_repo_path.name,
        ],
        cwd=root_folder,
    )
    spinner.succeed(
        f"cloning feeback source repo : {feedback_source_repo} ... {t:4.2f} sec"
    )
    client = httpx.AsyncClient(headers=httpx.Headers(
        {
            "User-Agent": "GitHubClassroomUtils/1.0",
            "Authorization": "token " + token,
            # needed for the check-suites request
            "Accept": "application/vnd.github.antiope-preview+json",
        }))

    hw_path = feedback_repo_path / homework_prefix / "reports"

    # generate feedbacks
    fbs, t = measure_time(gen_feedbacks)(homework_prefix, hw_path,
                                         feedback_vars)
    spinner.succeed(f"Generate content for feedbacks ... {t:5.3f} sec")

    # handle only_id
    if only_id:
        try:
            # detect possible buggy condition
            info = gstudents.get_student(only_id)
        except RuntimeError as e:
            print(" *=" * 30)
            print("Warning!")
            print(e)
            return
        only_repo_name = get_hw_repo_name(homework_prefix,
                                          info["github_handle"])
        fbs = list(filter(lambda fb: fb["repo_name"] == only_repo_name, fbs))

    async def push_to_remote(feedback_title, feedbacks):
        # push to remote
        async def push_feedback(fb):
            request_body = {"title": feedback_title, "body": fb["value"]}
            try:
                issue_num = await find_existing_issue(client, org,
                                                      fb["repo_name"],
                                                      feedback_title)
            except BaseException:
                print(f'error on {fb["repo_name"]}')
                return
            if issue_num:
                request_body["state"] = "open"  # reopen issue
                url = f"https://api.github.com/repos/{org}/{fb['repo_name']}/issues/{issue_num}"
                await edit_issue_async(client, url, issue_num, request_body)
            else:
                url = f"https://api.github.com/repos/{org}/{fb['repo_name']}/issues"
                await create_issue_async(client, url, request_body)
            print(f'success {fb["repo_name"]}')

        async with trio.open_nursery() as nursery:
            for fb in feedbacks:
                nursery.start_soon(push_feedback, fb)

    # print out target repos
    print("repo to announce grade:")
    pprint([fb["repo_name"] for fb in fbs])

    if dry:
        spinner.succeed("DRYRUN: skip push to remote")
    else:
        if typer.confirm("Do you want to continue?", default=False):
            _, t = measure_time(trio.run)(push_to_remote,
                                          student_feedback_title, fbs)
            spinner.succeed(f"Push feedbacks to remote ... {t:5.2f} sec")
        else:
            spinner.warn("You refused to publish to remote")

    spinner.succeed("finished announce grade")
    return
예제 #12
0
def filterScrape(need, category, page):

    spinner = Halo(text='Scraping content', spinner='dots', animation='bounce')
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
    }
    output_dic = OrderedDict()
    found = 0

    try:
        while (found < need):
            spinner.start()
            url = "https://ctftime.org/writeups?page={}&hidden-tags={}".format(
                page, category)
            spinner.text = "Scraping Page: {}".format(page)
            response = requests.get(url, headers=headers)
            soup = BeautifulSoup(response.content, 'html.parser')
            count_per_page = 0
            for tr in soup.find_all('tr')[1:]:
                tds = tr.find_all('td')
                w_no = tds[4].a["href"]
                task_name = tds[1].text
                writeup_url = "https://ctftime.org/" + w_no
                r = requests.get(writeup_url, headers=headers)
                spinner.text = "Parsing {} ({})".format(
                    w_no,
                    task_name.encode('ascii', 'ignore').decode('ascii'))
                spinner.color = "red"

                if (len(task_name) > 30):
                    task_name = task_name[:27] + '...'

                flag = 0
                original_url = ""
                new_soup = BeautifulSoup(r.content, 'lxml')
                a = new_soup.find_all('a')

                for link in a:
                    if link.text == "Original writeup":
                        original_url = link['href']
                        if (len(original_url) <= 125):
                            flag = 1
                            break
                if flag == 1:
                    if (task_name in output_dic):
                        output_dic[task_name] += '\n' + original_url
                    else:
                        output_dic[task_name] = original_url
                        count_per_page += 1
                        found += 1
                else:
                    if task_name not in output_dic:
                        count_per_page += 1
                        found += 1
                    output_dic[task_name] = writeup_url

                if (found == need):
                    break
                else:
                    continue

            if (count_per_page == 0):
                spinner.fail("Page {} doesn't exist.".format(page))
                spinner.info("Try decreasing the Page Seed or limit")
                spinner.info("Try changing the category")
                print(
                    "Such as : Change 'rev' -> 'reverse engineering' to get more results"
                )
                break
            else:
                spinner.succeed(
                    "Gathered writeups for {} tasks from page {}".format(
                        count_per_page, page))
                spinner.color = "cyan"
                page += 1

        return output_dic

    except (KeyboardInterrupt, SystemExit):
        spinner.warn('Program exited unexpectedly')
        exit()
예제 #13
0
def announce_grade(homework_prefix, token, dry, org, only_id,
                   feedback_source_repo):
    """announce student grades to each hw repo"""

    ensure_gh_token(token)
    # TODO: use logging lib to log messages
    spinner = Halo(stream=sys.stderr)

    student_feedback_title = f"Grade for {homework_prefix}"

    gstudents = Gstudents()
    feedback_vars = gstudents.left_join(homework_prefix)

    # Clone feedback repo & set needed variables
    cur = Path(".")

    for d in cur.glob("feedback-tmp-*"):
        shutil.rmtree(d)
    spinner.info("delete dated folder")

    root_folder = Path(
        tempfile.mkdtemp(
            prefix="feedback-tmp-{}-".format(
                datetime.now().strftime("%b%d%H%M%S")),
            dir=".",
        ))
    spinner.succeed(normal.txt("Create tmp folder ").kw(root_folder).to_str())

    feedback_repo_path = root_folder / "feedbacks"

    spinner.info(f"cloning feeback source repo : {feedback_source_repo}")
    _, t = measure_time(sp.run)(
        [
            "git",
            "clone",
            f"https://github.com/{org}/{feedback_source_repo}.git",
            feedback_repo_path.name,
        ],
        cwd=root_folder,
    )
    spinner.succeed(
        f"cloning feeback source repo : {feedback_source_repo} ... {t:4.2f} sec"
    )
    client = httpx.AsyncClient(headers=httpx.Headers(
        {
            "User-Agent": "GitHubClassroomUtils/1.0",
            "Authorization": "token " + token,
            # needed for the check-suites request
            "Accept": "application/vnd.github.antiope-preview+json",
        }))

    hw_path = feedback_repo_path / homework_prefix / "reports"

    # generate feedbacks
    fbs, t = measure_time(gen_feedbacks)(homework_prefix, hw_path,
                                         feedback_vars)
    spinner.succeed(f"Generate content for feedbacks ... {t:5.3f} sec")

    # handle only_id
    if only_id:
        try:
            # detect possible buggy condition
            info = gstudents.get_student(only_id)
        except RuntimeError as e:
            print(" *=" * 30)
            print("Warning!")
            print(e)
            return
        only_repo_name = get_hw_repo_name(homework_prefix,
                                          info["github_handle"])
        fbs = list(filter(lambda fb: fb["repo_name"] == only_repo_name, fbs))

    async def push_to_remote(feedback_title, feedbacks):
        # push to remote
        async def push_feedback(fb):
            request_body = {"title": feedback_title, "body": fb["value"]}
            try:
                issue_num = await find_existing_issue(client, org,
                                                      fb["repo_name"],
                                                      feedback_title)
            except BaseException:
                print(f'error on {fb["repo_name"]}')
                return
            if issue_num:
                request_body["state"] = "open"  # reopen issue
                url = f"https://api.github.com/repos/{org}/{fb['repo_name']}/issues/{issue_num}"
                await edit_issue_async(client, url, issue_num, request_body)
            else:
                url = f"https://api.github.com/repos/{org}/{fb['repo_name']}/issues"
                await create_issue_async(client, url, request_body)
            print(f'success {fb["repo_name"]}')

        async with trio.open_nursery() as nursery:
            for fb in feedbacks:
                nursery.start_soon(push_feedback, fb)

    # print out target repos
    print("repo to announce grade:")
    pprint([fb["repo_name"] for fb in fbs])

    if dry:
        spinner.succeed("DRYRUN: skip push to remote")
    else:
        if click.confirm("Do you want to continue?", default=False):
            _, t = measure_time(trio.run)(push_to_remote,
                                          student_feedback_title, fbs)
            spinner.succeed(f"Push feedbacks to remote ... {t:5.2f} sec")
        else:
            spinner.warn("You refused to publish to remote")

    spinner.succeed("finished announce grade")
    return
예제 #14
0
파일: remotes.py 프로젝트: arbal/ConsolePi
class Remotes:
    """Remotes Object Contains attributes for discovered remote ConsolePis"""
    def __init__(self, local, cpiexec):
        self.cpiexec = cpiexec
        self.pop_list = []
        self.old_api_log_sent = False
        self.log_sym_warn = log_sym.WARNING.value
        self.log_sym_error = log_sym.ERROR.value
        self.local = local
        self.connected = False
        self.cache_update_pending = False
        self.spin = Halo(spinner="dots")
        self.cloud = None  # Set in refresh method if reachable
        self.do_cloud = config.cfg.get("cloud", False)
        CLOUD_CREDS_FILE = config.static.get("CLOUD_CREDS_FILE")
        if not CLOUD_CREDS_FILE:
            self.no_creds_error()
        if self.do_cloud and config.cloud_svc == "gdrive":
            if utils.is_reachable("www.googleapis.com", 443):
                self.local_only = False
                if not utils.valid_file(CLOUD_CREDS_FILE):
                    self.no_creds_error()
            else:
                log.warning(
                    f"failed to connect to {config.cloud_svc} - operating in local only mode",
                    show=True,
                )
                self.local_only = True
        self.data = self.get_remote(data=config.remote_update(
        ))  # re-get cloud.json to capture any updates via mdns

    def no_creds_error(self):
        cloud_svc = config.cfg.get("cloud_svc", "UNDEFINED!")
        log.warning(
            f"Required {cloud_svc} credentials files are missing refer to GitHub for details"
        )
        log.warning(f"Disabling {cloud_svc} updates")
        log.show("Cloud Function Disabled by script - No Credentials Found")
        self.do_cloud = config.cfg["do_cloud"] = False

    # get remote consoles from local cache refresh function will check/update cloud file and update local cache
    def get_remote(self, data=None, rename=False):
        spin = self.spin

        def verify_remote_thread(remotepi, data, rename):
            """sub to verify reachability and api data for remotes

            params:
            remotepi: The hostname currently being processed
            data: dict remote ConsolePi dict with hostname as key
            """
            this = data[remotepi]
            res = self.api_reachable(remotepi, this, rename=rename)
            this = res.data
            if res.update:
                self.cache_update_pending = True

            if not res.reachable:
                log.warning(
                    f"[GET REM] Found {remotepi} in Local Cloud Cache: UNREACHABLE"
                )
                this["fail_cnt"] = (1 if not this.get("fail_cnt") else
                                    this["fail_cnt"] + 1)
                self.pop_list.append(remotepi)
                self.cache_update_pending = True
            else:
                self.connected = True
                if this.get("fail_cnt"):
                    this["fail_cnt"] = 0
                    self.cache_update_pending = True
                if res.update:
                    log.info(
                        f"[GET REM] Updating Cache - Found {remotepi} in Local Cloud Cache, "
                        f"reachable via {this['rem_ip']}")

            data[remotepi] = this

        if data is None or len(data) == 0:
            data = config.remotes  # remotes from local cloud cache

        if not data:
            # print(self.log_sym_warn + " No Remotes in Local Cache")
            log.info("No Remotes found in Local Cache")
            data = {}  # convert None type to empy dict
        else:
            # if self is in the remote-data remove and warn user (can occur in rare scenarios i.e. hostname changes)
            if socket.gethostname() in data:
                del data[socket.gethostname()]
                log.show(
                    "Local cache included entry for self - do you have other ConsolePis using the same hostname?"
                )

            # Verify Remote ConsolePi details and reachability
            if stdin.isatty():
                spin.start(
                    "Querying Remotes via API to verify reachability and adapter data"
                )
            for remotepi in data:
                # -- // Launch Threads to verify all remotes in parallel \\ --
                threading.Thread(
                    target=verify_remote_thread,
                    args=(remotepi, data, rename),
                    name=f"vrfy_{remotepi}",
                ).start()
                # verify_remote_thread(remotepi, data)  # Non-Threading DEBUG

            # -- wait for threads to complete --
            if not self.cpiexec.wait_for_threads(name="vrfy_",
                                                 thread_type="remote"):
                if config.remotes:
                    if stdin.isatty():
                        spin.succeed(
                            "[GET REM] Querying Remotes via API to verify reachability and adapter data\n\t"
                            f"Found {len(config.remotes)} Remote ConsolePis")
                else:
                    if stdin.isatty():
                        spin.warn(
                            "[GET REM] Querying Remotes via API to verify reachability and adapter data\n\t"
                            "No Reachable Remote ConsolePis Discovered")
            else:
                log.error(
                    "[GET REM] Remote verify threads Still running / exceeded timeout"
                )
                if stdin.isatty():
                    spin.stop()

        # update local cache if any ConsolePis found UnReachable
        if self.cache_update_pending:
            if self.pop_list:
                for remotepi in self.pop_list:
                    if (
                            data[remotepi]["fail_cnt"] >= 3
                    ):  # NoQA remove from local cache after 3 failures (cloud or mdns will repopulate if discovered)
                        removed = data.pop(remotepi)
                        log.warning(
                            "[GET REM] {} has been removed from Local Cache after {} failed attempts"
                            .format(remotepi, removed["fail_cnt"]),
                            show=True,
                        )
                    else:
                        log.show("Cached Remote '{}' is unreachable".format(
                            remotepi))

            # update local cache file if rem_ip or adapter data changed
            data = self.update_local_cloud_file(data)
            self.pop_list = []
            self.cache_update_pending = False

        return data

    # Update with Data from ConsolePi.csv on Gdrive and local cache populated by mdns.  Update Gdrive with our data
    def refresh(self, bypass_cloud=False):
        remote_consoles = None
        cpiexec = self.cpiexec
        local = self.local
        cloud_svc = config.cfg.get("cloud_svc", "error")

        # TODO refactor wait_for_threads to have an all key or accept a list
        with Halo(text="Waiting For threads to complete", spinner="dots1"):
            if cpiexec.wait_for_threads(thread_type="remotes") and (
                    config.power
                    and cpiexec.wait_for_threads(name="_toggle_refresh")):
                log.show(
                    "Timeout Waiting for init or toggle threads to complete try again later or"
                    " investigate logs")
                return

        # -- // Update/Refresh Local Data (Adapters/Interfaces) \\ --
        local.data = local.build_local_dict(refresh=True)
        log.debugv(
            f"Final Data set collected for {local.hostname}: {local.data}")

        # -- // Get details from Google Drive - once populated will skip \\ --
        if not bypass_cloud and self.do_cloud and not self.local_only:
            if cloud_svc == "gdrive" and self.cloud is None:
                # burried import until I find out why this import takes so @#%$@#% long.  Not imported until 1st refresh is called
                with Halo(text="Loading Google Drive Library",
                          spinner="dots1"):
                    from consolepi.gdrive import GoogleDrive
                self.cloud = GoogleDrive(hostname=local.hostname)
                log.info("[MENU REFRESH] Gdrive init")

            # Pass Local Data to update_sheet method get remotes found on sheet as return
            # update sheets function updates local_cloud_file
            _msg = "[MENU REFRESH] Updating to/from {}".format(cloud_svc)
            log.info(_msg)
            if stdin.isatty():
                self.spin.start(_msg)
            # -- // SYNC DATA WITH GDRIVE \\ --
            remote_consoles = self.cloud.update_files(
                local.data)  # local data refreshed above
            if remote_consoles and "Gdrive-Error:" not in remote_consoles:
                if stdin.isatty():
                    self.spin.succeed(_msg +
                                      "\n\tFound {} Remotes via Gdrive Sync".
                                      format(len(remote_consoles)))
                    for r in remote_consoles:
                        # -- Convert Any Remotes with old API schema to new API schema --
                        if isinstance(remote_consoles[r].get("adapters", {}),
                                      list):
                            remote_consoles[r][
                                "adapters"] = self.convert_adapters(
                                    remote_consoles[r]["adapters"])
                            log.warning(
                                f"Adapter data for {r} retrieved from cloud in old API format... Converted"
                            )
            elif "Gdrive-Error:" in remote_consoles:
                if stdin.isatty():
                    self.spin.fail("{}\n\t{} {}".format(
                        _msg, self.log_sym_error, remote_consoles))
                log.show(remote_consoles
                         )  # display error returned from gdrive module
                remote_consoles = []
            else:
                if stdin.isatty():
                    self.spin.warn(_msg +
                                   "\n\tNo Remotes Found via Gdrive Sync")

            if len(remote_consoles) > 0:
                _msg = f"[MENU REFRESH] Updating Local Cache with data from {cloud_svc}"
                log.info(_msg)
                if stdin.isatty():
                    self.spin.start(_msg)
                self.update_local_cloud_file(remote_consoles)
                if stdin.isatty():
                    self.spin.succeed(_msg)  # no real error correction here
            else:
                log.warning(
                    f"[MENU REFRESH] No Remote ConsolePis found on {cloud_svc}",
                    show=True,
                )
        else:
            if self.do_cloud and not bypass_cloud:
                log.show(
                    f"Not Updating from {cloud_svc} due to connection failure\n"
                    "Close and re-launch menu if network access has been restored"
                )

        # Update Remote data with data from local_cloud cache / cloud
        self.data = self.get_remote(data=remote_consoles)

    def update_local_cloud_file(self,
                                remote_consoles=None,
                                current_remotes=None,
                                local_cloud_file=None):
        """Update local cloud cache (cloud.json).

        Verifies the newly discovered data is more current than what we already know and updates the local cloud.json file if so
        The Menu uses cloud.json to populate remote menu items

        params:
            remote_consoles: The newly discovered data (from Gdrive or mdns)
            current_remotes: The current remote data fetched from the local cloud cache (cloud.json)
                - func will retrieve this if not provided
            local_cloud_file The path to the local cloud file (global var cloud.json)

        returns:
        dict: The resulting remote console dict representing the most recent data for each remote.
        """
        local_cloud_file = (config.static.get("LOCAL_CLOUD_FILE")
                            if local_cloud_file is None else local_cloud_file)

        if len(remote_consoles) > 0:
            if current_remotes is None:
                current_remotes = self.data = config.remote_update(
                )  # grabs the remote data from local cloud cache

        # update current_remotes dict with data passed to function
        if len(remote_consoles) > 0:
            if current_remotes is not None:
                for _ in current_remotes:
                    if _ not in remote_consoles:
                        if ("fail_cnt" not in current_remotes[_]
                                or current_remotes[_]["fail_cnt"] < 2):
                            remote_consoles[_] = current_remotes[_]
                        elif (remote_consoles.get(_)
                              and "fail_cnt" not in remote_consoles[_]
                              and "fail_cnt" in current_remotes[_]):
                            remote_consoles[_]["fail_cnt"] = current_remotes[
                                _]["fail_cnt"]
                    else:

                        # -- VERBOSE DEBUG --
                        log.debugv(
                            "[CACHE UPD] \n--{}-- \n    remote upd_time: {}\n    remote rem_ip: {}\n    remote source: {}\n    cache rem upd_time: {}\n    cache rem_ip: {}\n    cache source: {}\n"
                            .format(  # NoQA
                                _,
                                time.strftime(
                                    "%a %x %I:%M:%S %p %Z",
                                    time.localtime(
                                        remote_consoles[_]["upd_time"]),
                                ) if "upd_time" in remote_consoles[_] else
                                None,  # NoQA
                                remote_consoles[_]["rem_ip"]
                                if "rem_ip" in remote_consoles[_] else None,
                                remote_consoles[_]["source"]
                                if "source" in remote_consoles[_] else None,
                                time.strftime(
                                    "%a %x %I:%M:%S %p %Z",
                                    time.localtime(
                                        current_remotes[_]["upd_time"]),
                                ) if "upd_time" in current_remotes[_] else
                                None,  # NoQA
                                current_remotes[_]["rem_ip"]
                                if "rem_ip" in current_remotes[_] else None,
                                current_remotes[_]["source"]
                                if "source" in current_remotes[_] else None,
                            ))
                        # -- END VERBOSE DEBUG --

                        # No Change Detected (data passed to function matches cache)
                        if "last_ip" in current_remotes[_]:
                            del current_remotes[_]["last_ip"]
                        if remote_consoles[_] == current_remotes[_]:
                            log.debug(
                                "[CACHE UPD] {} No Change in info detected".
                                format(_))

                        # only factor in existing data if source is not mdns
                        elif ("upd_time" in remote_consoles[_]
                              or "upd_time" in current_remotes[_]):
                            if ("upd_time" in remote_consoles[_]
                                    and "upd_time" in current_remotes[_]):
                                if (current_remotes[_]["upd_time"] >
                                        remote_consoles[_]["upd_time"]):
                                    remote_consoles[_] = current_remotes[_]
                                    log.info(
                                        f"[CACHE UPD] {_} Keeping existing data from {current_remotes[_].get('source', '')} "
                                        "based on more current update time")
                                elif (remote_consoles[_]["upd_time"] >
                                      current_remotes[_]["upd_time"]):
                                    log.info(
                                        "[CACHE UPD] {} Updating data from {} "
                                        "based on more current update time".
                                        format(_,
                                               remote_consoles[_]["source"]))
                                else:  # -- Update Times are equal --
                                    if (current_remotes[_].get("adapters") and
                                            remote_consoles[_].get("adapters")
                                            and current_remotes[_]["adapters"].
                                            keys() != remote_consoles[_]
                                        ["adapters"].keys()
                                        ) or remote_consoles[_].get(
                                            "interfaces",
                                            {}) != current_remotes[_].get(
                                                "interfaces", {}):
                                        log.warning(
                                            "[CACHE UPD] {} current cache update time and {} update time are equal"
                                            " but data appears to have changed. Updating"
                                            .format(
                                                _,
                                                remote_consoles[_]["source"]))
                            elif "upd_time" in current_remotes[_]:
                                remote_consoles[_] = current_remotes[_]
                                log.info(
                                    "[CACHE UPD] {} Keeping existing data based *existence* of update time "
                                    "which is lacking in this update from {}".
                                    format(_, remote_consoles[_]["source"]))

            for _try in range(0, 2):
                try:
                    with open(local_cloud_file, "w") as cloud_file:
                        cloud_file.write(
                            json.dumps(remote_consoles,
                                       indent=4,
                                       sort_keys=True))
                        utils.set_perm(
                            local_cloud_file
                        )  # a hack to deal with perms ~ consolepi-details del func
                        break
                except PermissionError:
                    utils.set_perm(local_cloud_file)

        else:
            log.warning(
                "[CACHE UPD] cache update called with no data passed, doing nothing"
            )

        return remote_consoles

    # Currently not Used
    def do_api_request(self, ip: str, path: str, *args, **kwargs):
        """Send RestFul GET request to Remote ConsolePi to collect data

        params:
        ip(str): ip address or FQDN of remote ConsolePi
        path(str): path beyond /api/v1.0/

        returns:
        response object
        """
        url = f"http://{ip}:5000/api/v1.0/{path}"
        log.debug(f'[do_api_request] URL: {url}')

        headers = {
            "Accept": "*/*",
            "Cache-Control": "no-cache",
            "Host": f"{ip}:5000",
            "accept-encoding": "gzip, deflate",
            "Connection": "keep-alive",
            "cache-control": "no-cache",
        }

        try:
            response = requests.request("GET",
                                        url,
                                        headers=headers,
                                        timeout=config.remote_timeout)
        except (OSError, TimeoutError):
            log.warning(
                f"[API RQST OUT] Remote ConsolePi @ {ip} TimeOut when querying via API - Unreachable."
            )
            return False

        if response.ok:
            log.info(f"[API RQST OUT] {url} Response: OK")
            log.debugv(
                f"[API RQST OUT] Response: \n{json.dumps(response.json(), indent=4, sort_keys=True)}"
            )
        else:
            log.error(f"[API RQST OUT] API Request Failed {url}")

        return response

    def get_adapters_via_api(self, ip: str, rename: bool = False):
        """Send RestFul GET request to Remote ConsolePi to collect adapter info

        params:
        ip(str): ip address or FQDN of remote ConsolePi

        returns:
        adapter dict for remote if successful
        Falsey or response status_code if an error occured.
        """
        # log = self.config.log
        if rename:
            url = f"http://{ip}:5000/api/v1.0/adapters?refresh=true"
        else:
            url = f"http://{ip}:5000/api/v1.0/adapters"
        log.info(url)  # DEBUG

        headers = {
            "Accept": "*/*",
            "Cache-Control": "no-cache",
            "Host": f"{ip}:5000",
            "accept-encoding": "gzip, deflate",
            "Connection": "keep-alive",
            "cache-control": "no-cache",
        }

        try:
            response = requests.request("GET",
                                        url,
                                        headers=headers,
                                        timeout=config.remote_timeout)
        except (OSError, TimeoutError):
            log.warning(
                "[API RQST OUT] Remote ConsolePi @ {} TimeOut when querying via API - Unreachable."
                .format(ip))
            return False

        if response.ok:
            ret = response.json()
            ret = ret["adapters"] if ret["adapters"] else response.status_code
            _msg = "Adapters Successfully retrieved via API for Remote ConsolePi @ {}".format(
                ip)
            log.info("[API RQST OUT] {}".format(_msg))
            log.debugv("[API RQST OUT] Response: \n{}".format(
                json.dumps(ret, indent=4, sort_keys=True)))
        else:
            ret = response.status_code
            log.error(
                "[API RQST OUT] Failed to retrieve adapters via API for Remote ConsolePi @ {}\n{}:{}"
                .format(ip, ret, response.text))
        return ret

    def api_reachable(self,
                      remote_host: str,
                      cache_data: dict,
                      rename: bool = False):
        """Check Rechability & Fetch adapter data via API for remote ConsolePi

        params:
            remote_host:str, The hostname of the Remote ConsolePi
            cache_data:dict, The ConsolePi dictionary for the remote (from cache file)
            rename:bool, rename = True will do api call with refresh=True Query parameter
                which tells the api to first update connection data from ser2net as it likely
                changed as a result of remote rename operation.

        returns:
            tuple [0]: Bool, indicating if data is different than cache
                  [1]: dict, Updated ConsolePi dictionary for the remote
        """
        class ApiReachableResponse:
            def __init__(self, update, data, reachable):
                self.update = update
                self.data = data
                self.reachable = reachable

        update = False
        local = self.local

        _iface_dict = cache_data["interfaces"]
        rem_ip_list = [
            _iface_dict[_iface].get("ip") for _iface in _iface_dict
            if not _iface.startswith("_")
            and _iface_dict[_iface].get("ip") not in local.ip_list
        ]

        # if inbound data includes rem_ip make sure to try that first
        for _ip in [cache_data.get("rem_ip"), cache_data.get("last_ip")]:
            if _ip:
                if _ip not in rem_ip_list or rem_ip_list.index(_ip) != 0:
                    rem_ip_list.remove(_ip)
                    rem_ip_list.insert(0, _ip)

        rem_ip = None
        for _ip in rem_ip_list:
            log.debug(f"[API_REACHABLE] verifying {remote_host}")
            _adapters = self.get_adapters_via_api(_ip, rename=rename)
            if _adapters:
                rem_ip = _ip  # Remote is reachable
                if not isinstance(
                        _adapters,
                        int):  # indicates an html error code was returned
                    if isinstance(
                            _adapters, list
                    ):  # indicates need for conversion from old api format
                        _adapters = self.convert_adapters(_adapters)
                        if not self.old_api_log_sent:
                            log.warning(
                                f"{remote_host} provided old api schema.  Recommend Upgrading to current."
                            )
                            self.old_api_log_sent = True
                    # Only compare config dict for each adapter as udev dict will generally be different due to time_since_init
                    if not cache_data.get("adapters") or {
                            a: {
                                "config": _adapters[a].get("config", {})
                            }
                            for a in _adapters
                    } != {
                            a: {
                                "config": cache_data["adapters"][a].get(
                                    "config", {})
                            }
                            for a in cache_data["adapters"]
                    }:
                        cache_data["adapters"] = _adapters
                        update = True  # --> Update if adapter dict is different
                    else:
                        cached_udev = [
                            False for a in cache_data["adapters"]
                            if 'udev' not in cache_data["adapters"][a]
                        ]
                        if False in cached_udev:
                            cache_data["adapters"] = _adapters
                            update = True  # --> Update if udev key not in existing data (udev not sent to cloud)
                elif _adapters == 200:
                    log.show(
                        f"Remote {remote_host} is reachable via {_ip},"
                        " but has no adapters attached\nit's still available in remote shell menu"
                    )

                # remote was reachable update last_ip, even if returned bad status_code still reachable
                if not cache_data.get("last_ip", "") == _ip:
                    cache_data["last_ip"] = _ip
                    update = True  # --> Update if last_ip is different than currently reachable IP
                break

        if cache_data.get("rem_ip") != rem_ip:
            cache_data["rem_ip"] = rem_ip
            update = (
                True  # --> Update if rem_ip didn't match (was previously unreachable)
            )

        if not _adapters:
            reachable = False
            if isinstance(cache_data.get("adapters"), list):
                _adapters = cache_data.get("adapters")
                _adapters = {
                    _adapters[_adapters.index(d)]["dev"]: {
                        "config": {
                            k: _adapters[_adapters.index(d)][k]
                            for k in _adapters[_adapters.index(d)]
                        }
                    }
                    for d in _adapters
                }
                cache_data["adapters"] = _adapters
                _msg = (
                    f"{remote_host} Cached adapter data was in old format... Converted to new.\n"
                    f"\t\t{remote_host} Should be upgraded to the current version of ConsolePi."
                )
                log.warning(_msg, show=True)
                update = True  # --> Convert to new and Update if cache data was in old format
        else:
            reachable = True

        return ApiReachableResponse(update, cache_data, reachable)

    def convert_adapters(self, adapters):
        return {
            adapters[adapters.index(d)]["dev"]: {
                "config": {
                    k: adapters[adapters.index(d)][k]
                    for k in adapters[adapters.index(d)]
                }
            }
            for d in adapters
        }
예제 #15
0
파일: utils.py 프로젝트: wangonya/cchat
    authy_id = config['user']['authy_id']
    authy_token = config['user']['authy_token']
    status = authy_api.users.status(authy_id)
    if status.ok():
        spinner.succeed("authy verified")
    else:
        spinner.fail(f"authy verification failed: {status.errors()}")
        sys.exit()
    identity = config['user']['identity']
    spinner.succeed(f"logged in as {identity}")
except (KeyError, TypeError):
    try:
        # get current users to check for duplicate username
        identities = client.chat.services(chat_service_sid).users.list()
        # create new user
        spinner.warn("new user")

        email = input("enter email: ").strip()
        while not email or not re.match("[^@]+@[^@]+\.[^@]+", email):
            if not email:
                spinner.warn("an email is required for registration")
                email = input("enter email: ").strip()
            elif not re.match("[^@]+@[^@]+\.[^@]+", email):
                spinner.warn("invali email format")
                email = input("enter email: ").strip()

        country_code = input("enter country code: ").strip()
        while not country_code:
            spinner.warn("country code is required for registration")
            country_code = input("enter country code (without +): ").strip()
예제 #16
0
class ProgressIndicator():
    def __init__(self, colorize, quiet):
        self._colorize = colorize
        self._quiet = quiet
        self._current_message = ''

    def start(self, message=None):
        if self._quiet:
            return
        if message is None:
            message = ''
        if self._colorize:
            text = color(message, 'info')
            self._spinner = Halo(spinner='bouncingBall', text=text)
            self._spinner.start()
            self._current_message = message
        else:
            msg(message)

    def update(self, message=None):
        if self._quiet:
            return
        if self._colorize:
            if self._current_message:
                self._spinner.succeed(
                    color(self._current_message, 'info', self._colorize))
            self._spinner.stop()
            self.start(message)
        else:
            msg(message)

    def warn(self, message=None):
        if self._quiet:
            return
        if self._colorize:
            self._spinner.succeed(
                color(self._current_message, 'info', self._colorize))
            self._spinner.stop()
            self._spinner.warn(color(message, 'warn', self._colorize))
            self._current_message = ''
        else:
            msg('WARNING: ' + message)

    def stop(self, message=None):
        if self._quiet:
            return
        if self._colorize:
            if self._current_message:
                self._spinner.succeed(
                    color(self._current_message, 'info', self._colorize))
            self._spinner.stop()
            self.start(message)
            self._spinner.succeed(color(message, 'info', self._colorize))
            self._spinner.stop()
            self._current_message = ''
        else:
            if message:
                msg(message)

    def fail(self, message=None):
        if self._colorize:
            self._spinner.fail(
                color(self._current_message, 'error', self._colorize))
            self._spinner.stop()
            self.start(message)
            self._spinner.fail(color(message, 'error', self._colorize))
            self._spinner.stop()
        else:
            msg('ERROR: ' + message)
예제 #17
0
파일: main.py 프로젝트: githubtrip/KarmaBot
while True:
    try:
        spinner.start()
        subreddit = reddit.subreddit('random')
        domains = ['i.redd.it', 'i.imgur.com']
        limit = None
        print('Random Subreddit Is: ', subreddit)

        submissions = list(subreddit.top('all', limit=limit))
        submission = random.choice(submissions)
        if submission.domain in domains:
            im = pyimgur.Imgur(imgur_id)
            uploaded_image = im.upload_image(url=submission.url)
            with open('links.txt', "a") as f:
                f.write(uploaded_image.link + "\n")
            reddit.validate_on_submit = True
            subreddit.submit(submission.title, url=uploaded_image.link)
            spinner.succeed('success')

        elif submission.domain not in domains:
            spinner.info('domain is not in domains :(')

    except Exception as e:
        exc = str(str(e))
        spinner.fail(text=exc)
        time.sleep(60)

    except KeyboardInterrupt:
        spinner.warn(text='shutting down :(')
        quit()
예제 #18
0
writer.writerow(obj1)
file.close()

spinner = Halo(text='Encrypting', spinner='dots')

### encryption

psswrd = input('Password: \n')

spinner.start()
encrypt_file(psswrd, 'diary.csv')

spinner.succeed('Encryption successful')
spinner = Halo(text='Uploading', spinner='dots')
spinner.start()

try:

    ### File upload
    ftp = connect()
    ftp.cwd('/home/Docs/backup/diary')
    file = open('diary.csv.enc', 'rb')
    r1 = ftp.storbinary('STOR diary.csv.enc', file)
    file.close()
    spinner.succeed('Upload successful')

except:
    spinner.warn('Failed to upload')

os.remove('diary.csv.enc')
#### end
예제 #19
0
def server():
    # STEP 1: CREATE AND BIND THE SERVER :
    # assaiging ip and port adress for the socket
    s.bind(("localhost", port_no))
    server_loader()
    # setting number of client which it can listen
    s.listen(5)
    # running server

    # STEP 2: CONNECT ALL THE WORKER SERVER :
    # recivceving workers server connections
    if len(workers_adress) == 0:
        no_ws_aws = prompt(selecting_no_of_ws)
        no = int(no_ws_aws["no_of_ws"])
        for i in range(no):
            spinner1 = Halo(text='Loading', spinner='dots2', color="yellow")
            spinner1.start(
                "{0} server left to setup (please connect worker severs) ...".
                format(no - i))
            client, addrs = s.accept()
            workers_adress.append(addrs)
            workers_conec.append(client)
            time.sleep(1)
            spinner1.succeed('Succesfully connected with {0}'.format(addrs[0]))
            spinner1.stop()

        print("---------------------------------")
        print("Connected servers")
        print("No.  IP adress   Port    Status")
        for i in range(len(workers_adress)):

            print("[{0}]  {1}   {2}   Free".format(
                (i + 1), workers_adress[i][0], workers_adress[i][1]))
        print("---------------------------------")

    # STEP 3: CONNECT TO CLIENT :
    # accept end user connection request
    spinner3 = Halo(text='Loading', spinner='dots2')
    spinner3.start("Waiting for client to connect (please connect client) ...")
    user, addrs = s.accept()
    time.sleep(1)
    spinner3.succeed('Succesfully connected with client {0}'.format(addrs[0]))
    spinner3.stop()

    # STEP 4: SERVE CLIENT REQUESTS :
    log = 1
    while True:
        spinner4 = Halo(text='Loading', spinner='arrow3')
        spinner4.start("Waiting for a task from client ...")
        # sending confirmation msg to client
        msg = user.recv(1024).decode("utf-8")

        if "sum of" in msg:
            try:
                num = msg.split(" ")
                num = int(num[2])
                l = find(no, num)
                osum = 0
                for i in range(len(workers_conec)):
                    workers_conec[i].send(bytes(json.dumps(l[i]), "utf-8"))
                    w_sum = workers_conec[i].recv(1024).decode("utf-8")
                    osum = osum + int(w_sum)
                user.send(bytes("Sum = {0}".format(osum), "utf-8"))
                spinner4.warn(
                    "[{0}] : resquest = \'{1}\' : response = \'{2}\'".format(
                        log, msg, osum))
                spinner4.stop()
            except Exception as e:
                spinner4.fail("Fail to connect")

        elif msg == "close":
            for clt in workers_conec:
                clt.send(bytes("close", "utf-8"))
            user.send(bytes("close", "utf-8"))
            spinner4.fail(
                "[{0}] : resquest = \'{1}\' : response = \'{2}\'".format(
                    log, msg, "close"))
            spinner4.stop()
            s.close()
            break

        else:
            for clt in workers_conec:
                clt.send(bytes(msg, "utf-8"))
            user.send(bytes("Invalid task", "utf-8"))
            spinner4.warn(
                "[{0}] : resquest = \'{1}\' : response = \'{2}\'".format(
                    log, msg, "Invalid task"))
            spinner4.stop()
        log += 1
    # clossing the connection
    s.close()
예제 #20
0
        
        submissions = list(subreddit.top('all', limit=limit))
        submission = random.choice(submissions)
        if submission.domain in domains:
            im = pyimgur.Imgur(imgur_id)
            uploaded_image = im.upload_image(url=submission.url)
            with open ('links.txt', "a") as f:
                f.write(uploaded_image.link + "\n")
            reddit.validate_on_submit = True
            subreddit.submit(submission.title, url=uploaded_image.link)
            spinner.succeed('Success, posted to reddit with id', submission.id)
            
        elif submission.domain not in domains:
            spinner.info('Domain ', submission.domain, 'is not in the text database, writing it to domains.txt')
            with open ('domains.txt', "a") as domain:
                domain.write(submission.domain + "\n")
    if verbose_error == 1
        except Exception as e:
            exc = str(str(e))
            spinner.fail(text=exc)
            spinner.info('Pausing for ', sleep_timer, 'seconds. To modify timer, change sleep_timer variable in main.py.')
            time.sleep(sleep_timer)
    else
        spinner.fail('Failure to post, pausing for ', sleep_timer, 'seconds. Timer can be modified in main.py.')
        spinner.info('To check full details, enable verbose errors.')
        time.sleep(sleep_timer)
    
    except KeyboardInterrupt:
        spinner.warn(text='Shutting down bot.')
        quit()
예제 #21
0
 def remove_with_message(cls):
     spinner = Halo(text="Remove your local data...", spinner="dots")
     if UserInfoManager.remove():
         spinner.succeed("Data Successfully deleted.")
     else:
         spinner.warn("Data not found.")