Example #1
0
def main():
    """Main running function of entire script."""

    reddit = bot_setup()
    args = parse_args(sys.argv[1:])
    print('Gathering posts...')
    posts = get_posts(reddit, args.subreddit, args.count, args.sort, args.time)

    post_dict = mirror_posts(reddit, args.destination, posts)

    if args.comments:
        progress = Bar(
            'Mirroring comments for {} threads, this may take a while...'.
            format(len(post_dict)),
            max=len(post_dict))
        progress.start()

        for original, mirror in post_dict.items():
            original_submission = reddit.submission(original)
            mirrored_submission = reddit.submission(mirror)
            original_comments = original_submission.comments
            original_comments.replace_more()

            mirror_comments(original_comments, mirrored_submission, None)
            progress.next()
        progress.finish()

    print()
Example #2
0
    def draw_gazemaps(self):
        """
        Writes a reduced gazemap for all user/images
        Note, Gazemaps take up a lot of memory, therefore it needs to be freed after being used
        :return: Nothing
        """
        nb = 0
        for im in self.image_list:
            nb += im.nb_of_users()
        if nb == 0:
            nb = 1
        bar = Bar('Generating Heatmaps', max=nb, stream=sys.stdout)
        bar.start()

        for image in self.image_list:
            #if image.image_id == "1217722":
            #image.generate_all_heatmaps()
            #image.save_all_heatmaps()
            #image.save_all_heatmaps_ln()
            #image.save_all_heatmaps_by_image()
            image.save_all_heatmaps_reduced(bar)
            #image.remove_all_heatmaps()
            gc.collect()

        #for user in self.user_list:
        #if user.user_id == "3425935":
        #        for im_id in user.image_data:
        #            image = user.image_data[im_id]
        #            image.generate_heatmap(user.user_id)
        #        user.save_all_heatmaps_by_user()
        #        for im_id in user.image_data:
        #            image = user.image_data[im_id]
        #            image.remove_heatmap(user.user_id)
        #        gc.collect()
        bar.finish()
    def maxInRows(self, f, N):

        x = self.x
        t = self.t
        c = self.c
        dt = self.dt
        dx = self.dx

        num1 = len(t)
        num2 = len(x)
        n = zeros((num2, num1))
        u = zeros((num2, num1))

        # Defining constant for easy interpreting
        C = c * dt / dx
        alpha = (2 * rho) / (H)
        beta = dt**2 / m
        print(
            "--------------------------------------------------------------------------------"
        )
        print('Length of M  = ', num1)
        print('Length of N  = ', num2)
        print('C = ', C)
        print('beta =', beta)
        print('alpha =', alpha)
        print('dx =', dx)
        print('dt =', dt)
        print("Time: ", Time)
        """
        widgets = ['Progress: ', Percentage(), ' ', Bar(marker='0',left='[',right=']'),
                   ' ', ETA(), ' ', FileTransferSpeed()] #see docs for other options
                   """
        pbar = Bar('Loading', fill='#', suffix='%(percent)d%%')
        pbar.start()

        # For loop for N X M matrix
        for j in range(num1 - 1):
            # Initial force, applied from staples.
            u[0, j] = A * sin(t[j] * 2 * pi * f)
            pbar.next(j + 1)
            time.sleep(0.1)
            for i in range(num2 - 1):
                k[i] = 4e9 * exp(-4 * x[i])
                # my statement consisting of n, amplitude of membrane wave
                n[i,
                  j + 1] = beta * (u[i, j] -
                                   k[i] * n[i, j]) + 2 * n[i, j] - n[i, j - 1]
                # my statement consisting of u, pressure difference
                u[i, j+1] = C**2*(u[i+1, j] - 2*u[i, j] + u[i-1, j]) + 2*u[i, j]\
                    - u[i, j-1] - alpha * (n[i, j+1] - 2*n[i, j] + n[i, j-1])
        pbar.finish()
        print(
            "--------------------------------------------------------------------------------"
        )

        maxInRows = amax(n, axis=1)
        normalized = maxInRows / maxInRows.max()

        return normalized
Example #4
0
    def do_daily_iteration(self):
        log("Start Daily Iteration", is_heading=True)
        self.incorrect_aspect = []
        self.correct_aspect = []
        dailyDirectory = os.path.join(dest_directory, "DailySource")
        remove_all_files(dailyDirectory)
        progressBar = None
        try:
            urls = self.get_submissions_for_subreddit("day")
            log("URLS Gathered")

            totalCount = len(urls)
            if self.args.show_progress:
                progressBar = Bar("Downloading Daily Images",
                                  max=totalCount,
                                  suffix='%(index)d / %(max)d  %(percent)d%%')
                progressBar.start()
            # get suitable image for desktop background
            while len(urls) != 0:
                # select specific url at random
                attempt, urls = get_random_url(urls)
                image_url, post_permalink = attempt
                log("Process URL:", image_url, "URLS Left:", len(urls))

                # save first attempt at okay file url
                imageObj = RedditImage(image_url, post_permalink)
                imageObj.get_image_from_url(dailyDirectory)
                if not imageObj.image_downloaded():
                    continue

                if imageObj.image_is_landscape():
                    self.correct_aspect.append(imageObj)
                else:
                    self.incorrect_aspect.append(imageObj)
                if progressBar is not None:
                    progressBar.next()
                log("URL has been processed")

            if progressBar is not None:
                progressBar.finish()
                progressBar = None

            RedditDatabase().insert_images(self.correct_aspect +
                                           self.incorrect_aspect)
            if len(self.incorrect_aspect) > 0:
                log("Start Image Combining Process")
                ci = CombineImages(self.incorrect_aspect, dest_directory)
                log("Save Resulting Image")
                pathOfResult = ci.do_combine_landscape_process()
                RedditDatabase().insert_combined(ci)
                log("Set Image as Desktop Background")
                RedditImage.set_image_to_desktop_background(pathOfResult)
            else:
                self.correct_aspect[0].set_to_desktop_background()
        finally:
            # cleanup all images that had been temporarily downloaded
            if progressBar is not None:
                progressBar.finish()
Example #5
0
def editorial_publish(guides,
                      endpoint,
                      function_class,
                      user_agent,
                      nailgun_bin,
                      content_generator):
    """
    takes care of publishing the editorial content for the guides.
    """

    # init the nailgun thing for ed content generation.
    nailguninit(nailgun_bin,content_generator)


    searches= {}

    pbar = Bar('extracting editorial content for guides:',max=len(guides)+1)
    pbar.start()

    error = False
    for i, guide in enumerate(guides):
        jsonguide = None
        with open(guide,'r') as g:
            jsonguide = json.load(g)

        if not jsonguide:
            logging.error('could not load json from {0}'.format())
            error = True
            continue
        search = cityinfo.cityinfo(jsonguide)
        uri = cityres.cityres(search,endpoint)
        if not uri:
            logging.error(
                    'no dbpedia resource was found for {0}'.format(guide))
            error = True
            continue
        urls = urlinfer.urlinferdef([unquote(uri)])
        if len(urls) < 1:
            logging.error('no wikipedia/wikivoyage urls found/inferred'\
                   ' for resource {0}'.format(uri))
            error = True
            continue
        content = editorial_content(urls,function_class,user_agent)
        if not content:
            logging.error('no editorial content could be'\
                    ' generated for {0}'.format(guide))
            error = True
            continue

        #insert the content into the guide
        jsonsert.jsonsert(content, guide)

        logging.info('editorial content for {0} sucessfully'\
                ' inserted.'.format(guide))
        pbar.next()

    pbar.finish()
    return error
Example #6
0
    def wait_with_progress_bar(
            self,
            message: str,
            timeout: Optional[int] = PROGRESS_TIMEOUT) -> None:
        progress_bar = Bar(message, max=self.futures_count, check_tty=False)
        progress_bar.start()

        for progress in self.progress(timeout=timeout):
            progress_bar.goto(progress.completed)

        progress_bar.finish()
Example #7
0
def package_repo():
    setup_repo()
    repo_name = repo.repo_name
    pkg_temp = 'packaged-repos'
    pkg_dir = os.path.join(SCRIPT_DIR, pkg_temp)

    web_base = os.path.dirname(os.path.abspath(repo.web_dir))
    if not os.path.exists(web_base):
        print("Parent directory of repo '{0}' not found at: {1}"
              .format(repo_name, web_base))
        return False
    os.chdir(web_base)

    print("Gathering '{0}' repo directory data".format(repo_name))
    item_count = sum(len(d) + len(f) for _, d, f in os.walk(repo_name)) + 1
    print("  {0} items to archive".format(item_count))

    if not os.path.exists(pkg_dir):
        os.mkdir(pkg_dir)

    curdatetime = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    targz = os.path.join(pkg_dir,
                         "{0}-repo_{1}.tar.gz".format(repo_name, curdatetime))
    if os.path.exists(targz):
        os.unlink(targz)

    bar = Bar('Archiving repo', fill='=', max=item_count)

    def size_progress(tarinfo):
        bar.next()
        return tarinfo

    os.chdir(web_base)  # just to make sure
    try:
        bar.start()
        with tarfile.open(targz, "w:gz") as tar:
            tar.add(repo_name, filter=size_progress)
    except KeyboardInterrupt:
        print("\nArchiving error: keyboard interrupt; archive incomplete")
        return False
    except tarfile.TarError as e:
        print("\nArchiving error: {0}".format(e))
        return False
    finally:
        bar.finish()

    print("Repo '{0}' archived: {1}".format(repo_name, targz))

    return True
Example #8
0
def banner(guides,
           endpoint,
           function_description_class,
           user_agent,
           nailgun_bin,
           description_gen):
    """
    Insert a banner picture into a guide and download it on the file system.
    """

    nailguninit(nailgun_bin, description_gen)

    error = False
    pbar = Bar('fetching the depiction banner for the guides',max=len(guides)+1)
    pbar.start()

    for i, g in enumerate(guides):
        url = depiction_url(g, user_agent, function_description_class,
                endpoint)
        if url:
            guide_folder = os.path.dirname(g)
            filename = url_filename(url)
            error = download(guide_folder, filename, url)
            error |= zip_insert(guide_folder, filename)
            error |= remove_banner(guide_folder, filename)
            if error:
                logging.error('could not download/insert/remove {0}. There '\
                        'will be no banner for {1}'.format(url, g))
                insert_error = jsonsert.imagesert(g, None, None)

            else:
                logging.info('inerting details into the guide {0}'.format(g))
                insert_error = jsonsert.imagesert(g, filename, url)

        else:
            logging.error('could not find a depiction image for {0} so there will be no banner'.format(g))
            insert_error = jsonsert.imagesert(g, None, None)
            error = True

        if insert_error:
            logging.error("problem inserting the image into {0}".format(g))
            error = True

        pbar.next()


    pbar.finish()
    return error
Example #9
0
    def draw_scanpaths(self):
        """
        Draws all the scanpaths for the user/image pairs
        :return: 
        """
        nb = 0
        for im in self.image_list:
            nb += im.nb_of_users()
        if nb == 0:
            nb = 1
        bar = Bar('Generating Scanpaths', max=nb, stream=sys.stdout)
        bar.start()

        for image in self.image_list:
            image.save_all_scanpath(bar)
        bar.finish()
Example #10
0
    def draw_raw(self):
        """
        Draws all raw point visualisations for the user/image pairs.
        :return: 
        """
        nb = 0
        for im in self.image_list:
            nb += im.nb_of_users()
        if nb == 0:
            nb = 1
        bar = Bar('Generating raw points images', max=nb, stream=sys.stdout)
        bar.start()

        for image in self.image_list:
            image.save_all_raw(bar)
        bar.finish()
Example #11
0
def filter_poi(guides, f):
    """
    Remove certain POIS based on a filter function. Filter function should
    return True if the poi should be REMOVED.
    """
    nbr_guides = len(guides)

    bar = Bar('filtering the guides poi with a function.',max=nbr_guides)
    Error = False

    bar.start()

    for g in guides:
        cur_content = None
        with open(g,'r') as file_guide:
            cur_content = json.load(
                    file_guide,
                    object_pairs_hook=collections.OrderedDict)

        if not cur_content:
            logging.error('could not load content for:{}. POI not filtered'.format(g))

        else:

            # get the POI.
            guide_pois = None
            try:
                guide_pois = cur_content['Cities'][0]['pois']
            except:
                logging.error('{} did not contain any POI. They will not be filtered'.format(g))
                bar.next()
                continue

            new_pois = [p for p in guide_pois if not f(p)]
            cur_content['Cities'][0]['pois'] = new_pois

            # reserialize the guide content.
            with open(g,'w') as file_guide:
                json.dump(cur_content, file_guide)

        bar.next()


    bar.finish()
    return Error
Example #12
0
def remove_street_picture(guides):
    """
    Remove pictures from the poi when the subcategory is street.
    """

    removed_pic_name = []
    Error = False
    bar = Bar('removing street pics',max=len(guides))
    bar.start()
    for guide in guides:
        content = guide_content(guide)
        if not content:
            continue
        else:
            # get the pois
            pois = None
            try:
                pois = content['Cities'][0]['pois']
            except Exception as e:
                logging.error('guide {} did not contain pois. Street picture'\
                        ' will not be removed.'.format(guide))
                bar.next()
                continue

            for p in pois:
                sub = p.get('subcategory')
                if sub == 'street':
                    try:
                        pic = p['picture']['picture']
                        removed_pic_name.append(pic)
                        p.pop('picture')
                    except Exception as e:
                        continue

            # reserialize the content.
            with open(guide, 'w') as file_guide:
                json.dump(content,file_guide)

            #remove_from_zip(guide, removed_pic_name)

        bar.next()

    bar.finish()

    return Error
Example #13
0
def update_plugin():
    setup_repo()
    if args.zip_name.lower() == 'all':
        zips = [
            z for z in os.listdir(repo.upload_dir)
            if (os.path.isfile(os.path.join(repo.upload_dir, z))
                and z.lower().endswith('.zip'))
        ]
    else:
        zips = [args.zip_name]

    if not zips:
        if args.zip_name.lower() == 'all':
            print('No plugins archives found in uploads directory')
        else:
            print('No plugin archive name defined')
        return False

    repo.output = False  # nix qgis_repo output, since using progress bar
    up_bar = Bar("Updating plugins in '{0}'".format(repo.repo_name),
                 fill='=',
                 max=len(zips))
    up_bar.start()
    for i in up_bar.iter(range(0, len(zips))):
        try:
            repo.update_plugin(zips[i],
                               name_suffix=args.name_suffix,
                               auth=args.auth,
                               auth_role=args.auth_role,
                               git_hash=args.git_hash,
                               versions=args.versions,
                               keep_zip=args.keep_zip,
                               untrusted=args.untrusted,
                               invalid_fields=args.invalid_fields)
        except KeyboardInterrupt:
            return False

    if args.sort_xml:
        print("Sorting repo plugins.xml")
        post_sort = QgisPluginTree.plugins_sorted_by_name(
            repo.plugins_tree.plugins())
        repo.plugins_tree.set_plugins(post_sort)

    return True
Example #14
0
def add_parse_address(guides):
    """
    Uses reverse geocoding to try and add a parsed version of the address.
    """
    Error = False
    bar = Bar('adding parsed address',max=len(guides))
    bar.start()

    for guide in guides:
        content = guide_content(guide)
        if not content:
            continue
        else:
            # get the pois
            pois = None
            try:
                pois = content['Cities'][0]['pois']
            except Exception as e:
                logging.error('guide {} did not contain pois. Street picture'\
                        ' will not be removed.'.format(guide))
                bar.next()
                continue

            for p in pois:
                latitude = get_in(p, "location", "latitude")
                longitude = get_in(p, "location", "longitude")

                try:
                    coords = ", ".join([str(latitude),str(longitude)])
                    parsed = reversegeo.reverse_geocode(coords)
                    p['address']['parsed'] = parsed
                except:
                    Error=True
                    logging.error('could not add parsed address to a poi ...')

            # reserialize the content.
            with open(guide, 'w') as file_guide:
                json.dump(content,file_guide)


        bar.next()

    bar.finish()
    return Error
def update_plugin():
    setup_repo()
    if args.zip_name.lower() == 'all':
        zips = [z for z in os.listdir(repo.upload_dir)
                if (os.path.isfile(os.path.join(repo.upload_dir, z))
                    and z.lower().endswith('.zip'))]
    else:
        zips = [args.zip_name]

    if not zips:
        if args.zip_name.lower() == 'all':
            print('No plugins archives found in uploads directory')
        else:
            print('No plugin archive name defined')
        return False

    repo.output = False  # nix qgis_repo output, since using progress bar
    up_bar = Bar("Updating plugins in '{0}'".format(repo.repo_name),
                 fill='=', max=len(zips))
    up_bar.start()
    for i in up_bar.iter(range(0, len(zips))):
        try:
            repo.update_plugin(
                zips[i],
                name_suffix=args.name_suffix,
                auth=args.auth,
                auth_role=args.auth_role,
                git_hash=args.git_hash,
                versions=args.versions,
                keep_zip=args.keep_zip,
                untrusted=args.untrusted,
                invalid_fields=args.invalid_fields
            )
        except KeyboardInterrupt:
            return False

    if args.sort_xml:
        print("Sorting repo plugins.xml")
        post_sort = QgisPluginTree.plugins_sorted_by_name(
            repo.plugins_tree.plugins())
        repo.plugins_tree.set_plugins(post_sort)

    return True
Example #16
0
def remove_homepage_from_domains(guides,domains):
    """
    for all the guides, will remove the homepage of the poi that match a
    given domain.
    """
    pbar = Bar('removing bad homepages from guides',max=len(guides)+1)
    pbar.start()

    error = False
    for i,g in enumerate(guides):
        remove_error = remove_homepage_guide(g,domains)
        if remove_error:
            logging.error("could not remove the bad homepage'\
                    ' from {0}".format(g))
            error |= True

        pbar.next()

    pbar.finish()

    return error
Example #17
0
 def draw_all_timelines(self):
     """
     Draws all the timelines
     :return: None
     """
     self.draw_timeline(key_dates=[(
         "24/04/2017",
         "White Exam"), ("12/06/2017",
                         "Exam"), ("21/08/2017", "2nd Sess. Exam")])
     bar = Bar('Generating Timelines',
               max=len(self.user_list),
               stream=sys.stdout)
     bar.start()
     for u in self.user_list:
         u_id = u.user_id
         self.draw_timeline(key_dates=[("24/04/2017", "White Exam"),
                                       ("12/06/2017", "Exam"),
                                       ("21/08/2017", "2nd Sess. Exam")],
                            user_id=u_id)
         bar.next()
     bar.finish()
def retrieve_data_from(category, year_from, year_to):
    print('\nSearching arXiv papers from %s category...' % (category))

    for year in range(year_from, year_to + 1):
        print('Retrieving papers from year: %i...' % year)

        # Build a simple seach query with 1 paper request to retrieve info from the feed metadata
        query = (ARXIV_SEARCH_QUERY_URI % (year, year, category, 0, 1))

        # Perform a GET request and retrieve from the feed the total amount of data avaliable in the year
        response = urllib.request.urlopen(query)
        feed = feedparser.parse(response)
        total_results = int(feed.feed.opensearch_totalresults)
        print("Total: %i papers." % total_results)

        # Create a graph to keep the data
        g = nx.Graph()

        start_indexes = range(0, total_results, MAX_RESULTS_PER_REQUEST)
        bar = Bar('Pulling data',
                  max=len(start_indexes),
                  suffix='%(percent)d%%')  # set up progress bar
        bar.start()
        for start_from in start_indexes:
            query = (
                ARXIV_SEARCH_QUERY_URI %
                (year, year, category, start_from, MAX_RESULTS_PER_REQUEST))
            response = urllib.request.urlopen(query)
            feed = feedparser.parse(response)
            parse_feed(feed, g)
            time.sleep(WAIT_TIME)  # Avoid flooding the arXiv server

            # Update progress
            bar.next()

        bar.finish()

        print("Saving data to file...")
        save_as_csv(g, category, year)
        print('-' * 40)
Example #19
0
def zipclean(path, guide_name, frequency=1):
    """
    remove zip code from US postal addresses when they are found in mtrip
    guide.
    """
    logging.info('zipcode cleaning from directory {0} started'.format(path))

    guide_filenames = list_guide(path, guide_name)

    if len(guide_filenames) == 0:
        msg ='there was not guide with filename {0}'\
                'found under {1} so no zipcode cleaning was'\
                ' performed'.format(guide_name, path)
        logging.warning(msg)
        die(msg)

    error = False
    pbar = Bar('filtering the guides poi with a function.',max=len(guide_filenames))
    pbar.start()
    for i,g in enumerate(guide_filenames):
        guide_data = None
        with open(g,'r') as guide:
            guide_data = json.load(guide)

        if not guide_data:
            logging.error("could not load content from {0}".format(g))
            error = True
            continue

        clean_guide_data = clean_guide(guide_data, frequency)
        with open(g, 'w') as guide:
            json.dump(clean_guide_data, guide)

        logging.info('zipcode cleaning for {0} done'.format(g))
        pbar.next()

    logging.info('zipcode cleaning from directory {0} finished'.format(path))
    return error
Example #20
0
def load_raw_data(data_dir):
    """Load all the results
    """

    data_list = sorted(glob.glob(data_dir + "*.txt"))
    x, y, u_tmp, v_tmp, mask = openpiv.tools.load_vectors(data_list[0])

    u = np.empty([len(data_list), x.shape[0], x.shape[1]])
    v = np.empty_like(u)
    mask = np.empty_like(u)
    u[0, :, :] = u_tmp
    v[0, :, :] = v_tmp

    # load the rest of the data
    pbar = Bar("Importing data from {}".format(data_dir), max=len(data_list))
    pbar.start()
    for i in range(1, len(data_list)):
        pbar.next()
        x, y, u[i, :, :], v[i, :, :], mask[
            i, :, :] = openpiv.tools.load_vectors(data_list[i])

    pbar.finish()

    return (x, y, u, v, mask)
Example #21
0
def mirror_posts(reddit, destination, posts):
    """Mirror (crosspost) posts from one subreddit to another

    Arguments:
        reddit: A `praw.Reddit` instance
        destination: A Subreddit model to post to
        posts: A `praw.models.ListingGenerator` containing the posts to
            crosspost
    """
    post_dict = {}
    posts = list(posts)

    if reddit.user.me().name in reddit.subreddit(destination).moderator():
        progress = Bar('Crossposting...', max=len(posts))
        progress.start()
        for post in posts:
            crosspost = post.crosspost(destination, send_replies=False)
            post_dict[post.id] = crosspost.id
            progress.next()
        progress.finish()
    else:
        raise NotModeratorError("You are not a moderator of this subreddit.")

    return post_dict
Example #22
0
def mirror_repo():
    setup_repo()
    mirror_temp = 'mirror-temp'
    mirror_dir = os.path.join(SCRIPT_DIR, mirror_temp)
    merge_xml = 'merged.xml'

    if args.only_download and args.skip_download:
        print('Both --only-download and --skip-download specified! '
              'Choose either, but not both.')
        return False

    if args.skip_download:
        tree = QgisPluginTree(os.path.join(mirror_dir, merge_xml))
    else:
        xml_url = args.plugins_xml_url
        if not xml_url or not xml_url.lower().endswith('.xml'):
            print('Missing plugins.xml or URL does not end with .xml')
            return False
        url_parts = urlparse(xml_url)
        b_name = '{0}_{1}'.format(
            url_parts.hostname.replace('.', '-'),
            os.path.splitext(os.path.basename(xml_url))[0])

        if not os.path.exists(mirror_dir):
            os.mkdir(mirror_dir)
        repo.remove_dir_contents(mirror_dir, strict=False)

        q_vers = args.qgis_versions.replace(' ', '').split(',') \
            if args.qgis_versions is not None else None
        if q_vers is None:
            urls = [xml_url]
            names = ['{0}.xml'.format(b_name)]
        else:
            urls = ['{0}?qgis={1}'.format(xml_url, v)
                    for v in q_vers]
            names = ['{0}_{1}.xml'.format(b_name, v.replace('.', '-'))
                     for v in q_vers]

        tree = QgisPluginTree()
        dl_bar = Bar('Downloading/merging xml', fill='=', max=len(urls))
        dl_bar.start()
        try:
            for i in dl_bar.iter(range(0, len(urls))):
                out_xml = os.path.join(mirror_dir, names[i])
                download(urls[i], out=out_xml, bar=None)
                tree.merge_plugins(out_xml)
        except KeyboardInterrupt:
            return False

        print("Sorting merged plugins")
        name_sort = QgisPluginTree.plugins_sorted_by_name(tree.plugins())
        tree.set_plugins(name_sort)

        xml = tree.to_xml()

        print("Writing merged plugins to '{0}/{1}'".format(mirror_temp,
                                                           merge_xml))
        with open(os.path.join(mirror_dir, merge_xml), 'w') as f:
            f.write(xml)
        if args.only_xmls:
            return True

    downloads = {}
    elements = {}
    for p in tree.plugins():
        dl_url = p.findtext("download_url")
        file_name = p.findtext("file_name")
        if all([file_name, dl_url, dl_url not in downloads]):
            downloads[file_name] = dl_url
            elements[file_name] = p
            # for testing against plugins.qgis.org
            # if len(downloads) == 10:
            #     break

    if not args.skip_download:
        repo.remove_dir_contents(repo.upload_dir)

        dl_bar = Bar('Downloading plugins', fill='=', max=len(downloads))
        dl_bar.start()
        try:
            for f_name, dl_url in dl_bar.iter(downloads.iteritems()):
                out_dl = os.path.join(repo.upload_dir, f_name)
                download(dl_url, out=out_dl, bar=None)
        except KeyboardInterrupt:
            return False

    if args.only_download:
        print("Downloads complete, exiting since --only-download specified")
        return True

    zips = [z for z in os.listdir(repo.upload_dir)
            if (os.path.isfile(os.path.join(repo.upload_dir, z))
                and z.lower().endswith('.zip'))]
    if not zips:
        print('No plugins archives found in uploads directory')
        return False

    repo.output = False  # nix qgis_repo output, since using progress bar
    up_bar = Bar("Adding plugins to '{0}'".format(repo.repo_name),
                 fill='=', max=len(downloads))
    up_bar.start()
    try:
        for zip_name in up_bar.iter(downloads.iterkeys()):
            repo.update_plugin(
                zip_name,
                name_suffix=args.name_suffix,
                auth=args.auth,
                auth_role=args.auth_role,
                # don't remove existing or just-added plugins when mirroring
                versions='none',
                untrusted=True,
                invalid_fields=(not args.validate_fields)
            )
            # plugins are 'untrusted,' until overwritten with mirrored repo data
    except KeyboardInterrupt:
        return False

    print("Sort plugins in '{0}'".format(repo.repo_name))
    # Sorting is the right thing to do here, plus...
    # Helps ensure 'startswith' finding of plugins will find earliest occurrance
    # of a partial version, e.g. plugin.1.0 is found before plugin.1.0.1
    init_sort = QgisPluginTree.plugins_sorted_by_name(
        repo.plugins_tree.plugins())
    repo.plugins_tree.set_plugins(init_sort)

    up_bar = Bar("Updating '{0}' plugins with mirrored repo data"
                 .format(repo.repo_name),
                 fill='=', max=len(elements))
    up_bar.start()
    cp_tags = ['about', 'average_vote', 'author_name', 'create_date',
               'deprecated', 'description', 'downloads', 'experimental',
               'external_dependencies', 'homepage', 'rating_votes',
               'repository', 'tags', 'tracker', 'trusted', 'update_date',
               'uploaded_by']
    maybe_missing = []
    needs_resorted = False
    try:
        for file_name, el in up_bar.iter(elements.iteritems()):
            nam, _ = os.path.splitext(file_name)
            p = repo.plugins_tree.find_plugin_by_package_name(nam,
                                                              starts_with=True)
            if not p:  # maybe the base version has been adjusted, try again
                temp_nam = re.sub(r'((\d+\.)?(\d+\.)?(\d+))', r'.\1', nam)
                p = repo.plugins_tree.find_plugin_by_package_name(
                    temp_nam, starts_with=True)
            if not p:
                maybe_missing.append(file_name)
                continue
            else:
                p = p[0]

            # print("Updating '{0}'...".format(p[0].get('name')))
            for tag in cp_tags:
                tag_el = el.find(tag)
                tag_p = p.find(tag)
                if tag_el is not None and tag_p is not None:
                    txt = tag_el.text
                    # print("  {0}: {1} <- {2}".format(tag, tag_p.text, txt))
                    if tag in QgisPlugin.metadata_types('cdata'):
                        if tag_el.text is not None:
                            txt = etree.CDATA(tag_el.text)
                    tag_p.text = txt
            # update plugin name
            ns = args.name_suffix if args.name_suffix is not None \
                else repo.plugin_name_suffix
            if el.get('name') is not None:
                el_name = u"{0}{1}".format(el.get('name'), ns)
                if p.get('name') != el_name:
                    needs_resorted = True
                    p.set('name', el_name)
    except KeyboardInterrupt:
        return False

    if needs_resorted:
        print("Re-sorting plugins in '{0}'".format(repo.repo_name))
        re_sort = QgisPluginTree.plugins_sorted_by_name(
            repo.plugins_tree.plugins())
        repo.plugins_tree.set_plugins(re_sort)

    print("Writing '{0}' {1}".format(repo.repo_name, repo.plugins_xml_name))
    repo.write_plugins_xml(repo.plugins_tree_xml())

    print('\nDone mirroring...')

    print("Plugin results:\n  attempted: {0}\n  mirrored: {1}"
          .format(len(tree.plugins()), len(repo.plugins_tree.plugins())))

    if maybe_missing:
        print('\nWARNING (version conflicts): plugins downloaded but MAY not '
              'be in XML after update:\n  {0}\n'
              .format(', '.join(maybe_missing)))

    return True
Example #23
0
def description_publish(guides,
                        user_agent,
                        function_class,
                        nailgun_bin,
                        description_gen):
    """
    Publish the description content for the guides.
    """
    # start the nailgun thing for usage with decription_generation.
    nailguninit(nailgun_bin, description_gen)
    sources_domain = {'wikipedia','wikivoyage'}
    error = False
    for g in guides:
        jsonguide = None
        with open(g,'r') as guide:
            jsonguide = json.load(guide)

        if not jsonguide:
            logging.error('could not load json from {0}'.format(g))
            error = True
            continue

        # notice the 0 index here. This is ok because there is only one city
        # per guide. Maybe that will not be the case in the future.
        pois = jsonguide['Cities'][0]['pois']

        pbar = Bar('extracting description for the poi(s) in {0}:'.format(g),max=len(pois)+1)
        pbar.start()
        for i,p in enumerate(pois):
            desc = p['descriptions']
            for k, v in desc.items():
                try:
                    url = v['source'].get('url')
                except:
                    logging.error("source did not contain a dictionary"\
                            " for {0}".format(p['name']['name']))
                    continue
                hostname = urlparse(url).hostname
                if hostname:
                    tldn = hostname.split('.')[-2]
                else:
                    continue
                if tldn in sources_domain:
                    content_raw = description_content(
                            [url],
                            function_class,
                            user_agent)
                    c_list = json.loads(content_raw)
                    content = c_list[0] if len(c_list) > 0 else None
                    if not content:
                        poi_name = p['name']['name']
                        logging.error(
                                'failed to generate descriptive content'\
                                ' for {0} using url {1}'.format(poi_name,
                                    url))
                        error = True
                    else:
                        v['text'] = content.get('article',None)
                        v['source']['url'] = content.get('url',None)

            pbar.next()

        # redump the guide into the file
        pbar.finish()
        with open(g,'w') as guide:
            json.dump(jsonguide, guide)

    logging.info('description content succesfully inserted in all guides')
    return error
bytes1 =  int(n21)
bytes2 = bytes1 - 128

#Set Difficulty / IV
n = int(sys.argv[1])
n1 = str(n)

bar = Bar('Processing', max=n);
info1 = str("Keys: "+ str(a)+ ":"  + str(b))
info1 = colored(info1, 'yellow')

Diff = colored(str(n), 'red')
info = colored(str("Hash Difficulty: "), 'white')
#print(info1)
#print(info, Diff);
bar.start();
h = int(finEnc);

while(n>2):
      c=a+b;
      a=b;
      b=c;
      n=n-1;
      bar.next()
#Second Round Preparation

j = c
p = n *n
g07 = int(c * 2)
bar.next()
Example #25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-s", "--serial", default="/dev/ttyACM0", help="Path to USB serial device"
    )
    parser.add_argument(
        "-f", "--file", type=argparse.FileType("rb"), help="Path to .bin image file"
    )
    parser.add_argument(
        "-r",
        "--reboot",
        action="store_true",
        help="Reset device without flashing any image",
    )
    parser.add_argument(
        "-p",
        "--printsn",
        action="store_true",
        help="Print the serial number or an empty string if no device detected",
    )
    parser.add_argument(
        "-a",
        "--argument",
        type=int,
        default=-1,
        help="User argument (u32) passed to application after reset",
    )
    parser.add_argument(
        "-d", "--dummy", default=False, action="store_true", help="Dummy mode",
    )
    args = parser.parse_args()
    if args.file is not None and args.reboot:
        print("-f and -r can not both be specified at the same time")
        exit(1)

    if args.file is None and not args.reboot and not args.printsn:
        print("Either -f or -r or -p required")
        exit(1)

    if not args.dummy:
        ser = serial.Serial(args.serial)
        tty.setraw(ser.fd)
        # flush the serial port buffer, if any
        ser.timeout = 0.1
        ser.read(4096)

        ser.timeout = 2
    else:
        ser = DummySerial()

    if args.printsn:
        sn = read_serialNumber(ser)
        ret = 0
        if sn == None:
            sn = ""
            ret = 1
        print(sn)
        exit(ret)

    if not detect_dfu(ser):
        print("Device not in DFU mode")
        print(
            "Please enter DFU mode through menu CONFIG -> DFU or hold down "
            "the JOG LEFT button and power cycle the device."
        )
        exit(1)

    if args.file is not None:
        print(f"Uploading firmware file {args.file.name}")
        # set the flash address where the firmware is to be written
        ser.write(send_write_addr(0x08004000))
        size = os.stat(args.file.name).st_size
        pbar = Bar(message=os.path.basename(args.file.name), max=size)
        pbar.start()
        # write the firmware
        for data in block_reader(args.file, blocksize=1200):
            if data:
                ser.write(send_bytes(data))
                pbar.next(len(data))
                ser.read(1)  # wait for block write to complete
        pbar.finish()

    # set user argument
    if args.argument >= 0:
        print("Sending user argument")
        cmd = b"\x22\xe8" + int.to_bytes(args.argument, 4, "little")
        ser.write(cmd)

    print("Resetting")
    # reboot device
    ser.write([0x20, 0xEF, 0x5E])
Example #26
0
from progress.bar import Bar

# Secure access to domo
import secret

# General functions for RR
import rr_fun

import boto3  #AWS
import botocore

s3 = boto3.client('s3')
BUCKET_NAME = 'fabiano-crm-consys'

bar = Bar('Processing', max=3)
bar.start()


def employee(dispo):
    if pd.isna(dispo) == False:
        if any(x in dispo for x in [
                'apset', 'ows', 'lms', 'apconfirmed', 'gotrid', 'cbk',
                'aprschld', 'cant', 'email', 'notenough', 'nas', 'awc', 'mbf',
                'clsrnoap', 'nit', 'moveto', 'dnc', 'custnoap', 'dna',
                'custnoap_rsch', 'wrong', 'moveto', 'asm',
                'mtg not qualified (no fraud)', 'notwknumb', 'spanish',
                '30 days cbk', '60 days cbk', '90 days cbk', 'allbusy'
        ]):
            return "Fronter"
        elif any(x in dispo for x in [
                'nosalecb', 'sale', 'ccbk'
SELECTOR = ("//*[not(contains(@style,'display:none')"
            " or contains(@class,'hidden'))]"
            "/*/a[contains(@href,'{0}')"
            " or starts-with(@href,'/')]").format(host)

SELENIUM_SELECTOR = ("//*[not(contains(@style,'display:none')"
                     " or contains(@class,'hidden'))]"
                     "/*/a[contains(@href,'{0}')]")

childs = get_page_childs(initial_url)
CACHE = []
new_pages_count = len(childs)
bar = Bar('Processing', max=len(childs))

bar.start()
recursion = 0

while new_pages_count > 0:
    queue = Queue()
    results_queue = Queue()
    new_pages_count = 0

    for page in childs:
        if page['link'] not in CACHE:
            CACHE.append(page['link'])
            queue.put(page, timeout=10)

            # woraround for queue.put:
            time.sleep(0.01)
Example #28
0
    def do_weekly_iteration(self):
        if RedditAPI.should_run_weekly():
            log("Start Weekly Iteration", is_heading=True)
            initialSource = os.path.join(base_directory, "PictureSource",
                                         "LockScreenSource")
            finalSource = os.path.join(base_directory, "PictureSource",
                                       "LockScreen")
            remove_all_files(initialSource)
            remove_all_files(finalSource)
            progressBar = None
            try:
                weekly_urls = self.get_submissions_for_subreddit("week")
                log("URLS Gathered")
                totalCount = len(weekly_urls)
                landscape = []
                portrait = []
                if self.args.show_progress:
                    progressBar = Bar(
                        "Downloading Weekly Images",
                        max=totalCount,
                        suffix='%(index)d / %(max)d  %(percent)d%%')
                    progressBar.start()

                with open(
                        os.path.join(base_directory, "PictureSource",
                                     "lockScreenStat.txt"), "w") as f:
                    while len(weekly_urls) > 0:
                        # select specific url at random
                        attempt, urls = get_random_url(weekly_urls)
                        image_url, post_permalink = attempt

                        log("Process URL:", image_url, "URLS Left:", len(urls))

                        # get image and save if able
                        imageObj = RedditImage(image_url, post_permalink)
                        imageObj.get_image_from_url(initialSource)
                        if imageObj.image_is_landscape():
                            landscape.append(imageObj)
                        else:
                            portrait.append(imageObj)
                        if imageObj.image_downloaded():
                            f.write(str(imageObj) + "\n")

                        if progressBar is not None:
                            progressBar.next()

                        log("URL has been processed")

                    if progressBar is not None:
                        progressBar.finish()

                    for imageObj in landscape:
                        imageObj.move_to_folder(finalSource)
                        print("did the copy")

                    RedditDatabase().insert_images(landscape + portrait)

                    # iterate through creating landscape photos
                    resulting = CombineImages.iterate_combine_landscape(
                        portrait, finalSource)
                    RedditDatabase().insert_all_combined(resulting)
                self.set_weekly_run_file()
            finally:
                if progressBar is not None:
                    progressBar.finish()