def execute(self):
     """ Make the changes and offer a commit.
     """
     if utils.has_extension(self.vcs, 'gitflow'):
         self._gitflow_release_start()
     self._change_header()
     self._write_version()
     self._write_history()
     self._diff_and_commit()
示例#2
0
 def execute(self):
     """ Make the changes and offer a commit.
     """
     if utils.has_extension(self.vcs, 'gitflow'):
         self._gitflow_release_start()
     self._change_header()
     self._write_version()
     self._write_history()
     self._diff_and_commit()
示例#3
0
 def execute(self):
     """ Make the changes and offer a commit.
     """
     if utils.has_extension(self.vcs, 'gitflow'):
         self.vcs.gitflow_check_branch("develop", switch=True)
     self._write_version()
     self._change_header(add=True)
     self._write_history()
     self._diff_and_commit()
     self._push()
    def _find_files_rec(self, result, subpath, extension, ignore_dirs):
        full_path = self.to_full_path(subpath)

        for item, item_full_path in listdir_full(full_path):
            item_subpath = join_ipath(subpath, item)

            if os.path.isdir(item_full_path):
                if not self._is_path_one_of_those(item_subpath, ignore_dirs):
                    self._find_files_rec(result, item_subpath, extension, ignore_dirs)
            elif os.path.isfile(item_full_path):
                if has_extension(item, extension):
                    result.append(item_subpath)
示例#5
0
    def _find_files_rec(self, result, subpath, extension, ignore_dirs):
        full_path = self.to_full_path(subpath)

        for item, item_full_path in listdir_full(full_path):
            item_subpath = join_ipath(subpath, item)

            if os.path.isdir(item_full_path):
                if not self._is_path_one_of_those(item_subpath, ignore_dirs):
                    self._find_files_rec(result, item_subpath, extension,
                                         ignore_dirs)
            elif os.path.isfile(item_full_path):
                if has_extension(item, extension):
                    result.append(item_subpath)
def run():
  """The main entry point, parameterise SBML model files given
     on the command line
  """
  description = "Parameterise an SBML model based on a given param file"
  parser = argparse.ArgumentParser(description=description)
  # Might want to make the type of this 'FileType('r')'
  parser.add_argument('filenames', metavar='F', nargs='+',
                      help="input files: parameters and sbml model files")

  arguments = parser.parse_args()

  sbml_extentions = [ ".xml", ".sbml" ]
  param_files = [ x for x in arguments.filenames
                        if not utils.has_extension(x, sbml_extentions) ]

  sbml_files = [ x for x in arguments.filenames
                   if utils.has_extension(x, sbml_extentions) ]

  dictionary = dict()
  for param_file in param_files:
    parameters.parse_param_file(param_file, dictionary=dictionary)
  for sbml_file in sbml_files:
    parameterise_model_file(sbml_file, dictionary)
def run():
  """Perform the banalities of command-line argument processing and
     and then get under way in parameterising the model"""
  description = "Parameterise an SBML model based on a given param file"
  parser = argparse.ArgumentParser(description=description)
  # Might want to make the type of this 'FileType('r')'
  parser.add_argument('filenames', metavar='F', nargs='+',
                      help="Bio-PEPA and parameter files")

  arguments = parser.parse_args()

  biopepa_extentions = [ ".biopepa" ]
  param_files = [ x for x in arguments.filenames
                        if not utils.has_extension(x, biopepa_extentions) ]

  biopepa_files = [ x for x in arguments.filenames
                      if utils.has_extension(x, biopepa_extentions) ]

  dictionary = dict()
  for param_file in param_files:
    dictionary = parameters.parse_param_file(param_file, dictionary)

  for biopepa_file in biopepa_files:
    parameterise_model_file(dictionary, biopepa_file, "stdout")
示例#8
0
 def execute(self):
     """ Do the actual releasing.
     """
     logger.info('Location: ' + utils.execute_command('pwd'))
     if utils.has_extension(self.vcs, 'gitflow'):
         if self.vcs.gitflow_check_prefix("release"):
             self._gitflow_release_finish()
             current = self.vcs.current_branch()
             logger.info(('Switching from ' + current +
                          ' to master branch for egg generation.'))
             self.vcs.gitflow_check_branch("master", switch=True)
             self._release()
             logger.info('Switching to back to ' + current + ' branch.')
             self.vcs.gitflow_switch_to_branch(current)
         else:
             logger.critical(
                 "You are not on a release branch, first run a prerelease "
                 "or gitflow release.")
             sys.exit(1)
     else:
         self._make_tag()
         self._release()
示例#9
0
 def execute(self):
     """ Do the actual releasing.
     """
     logger.info('Location: ' + utils.execute_command('pwd'))
     if utils.has_extension(self.vcs, 'gitflow'):
         if self.vcs.gitflow_check_prefix("release"):
             self._gitflow_release_finish()
             current = self.vcs.current_branch()
             logger.info(
                 ('Switching from ' + current +
                  ' to master branch for egg generation.'))
             self.vcs.gitflow_check_branch("master", switch=True)
             self._release()
             logger.info('Switching to back to ' + current + ' branch.')
             self.vcs.gitflow_switch_to_branch(current)
         else:
             logger.critical(
                 "You are not on a release branch, first run a prerelease "
                 "or gitflow release.")
             sys.exit(1)
     else:
         self._make_tag()
         self._release()
def get_images(args=None):
    if not args:
        args = {}
    for subreddit in subreddits:
        rank = count
        print "-" * 5

        subreddit_target_dir = target_dir

        # if config is a tuple, then the target_dir is specified for this subreddit
        has_dir_config = False
        if hasattr(subreddit, '__iter__'):
            has_dir_config = True
            if make_sub_dirs:
                subreddit, subreddit_target_dir = subreddit
            else:
                subreddit, _ = subreddit
        if make_extra_sub_dirs and (not has_dir_config or (has_dir_config and target_dir_specified)):
            subreddit_target_dir = os.path.join(subreddit_target_dir, subreddit)
        make_dirs(subreddit_target_dir)

        target_url_template = "http://www.reddit.com/r/{}{}.json{}"
        starting_post = get_count_updated_request(count, target_url_template, subreddit, options)
        subreddit_options = options.copy()
        subreddit_options.update(after=starting_post)
        options_string = options_string_template.format(**subreddit_options)
        target_url = target_url_template.format(subreddit, sort, options_string)
        print u"{} -> {}".format(target_url, subreddit_target_dir)

        reddit_request = make_reddit_request(target_url)
        try:
            reddit_request.raise_for_status()
        except requests.exceptions.HTTPError as e:
            print e
            print "Request to subreddit failed! May not be a valid subreddit."
            failed_subreddits.append("{}, at url {}".format(subreddit, target_url))
            continue

        if isinstance(reddit_request.json, dict):
            json_data = reddit_request.json
        else:
            json_data = reddit_request.json()
        for post in json_data['data']['children']:
            url = post['data']['url']
            rank += 1

            if new_only and url in viewed_posts:
                print "Image already viewed. See {}".format("viewed_posts.txt")
                continue
            if url not in viewed_posts:
                with open(viewed_posts_path, "a") as f:
                    f.write(url + "\n")
            title = post['data']['title']
            file_title = get_scrubbed_file_title(title, use_rank, rank=rank)

            if not utils.has_extension(url):
                if "imgur" in url:
                    # check if album - if so, move on immediately
                    if 'a' in url.rsplit('/'):
                        urls = get_image_urls_from_imgur_album(url)
                        get_images_from_urls(urls, file_title, subreddit_target_dir, new_only)
                        continue
                    urls = [get_single_image_url_from_imgur(url)]
                elif "gfycat" in url:
                    urls = get_videos_from_gfycat(url)
                    get_images_from_urls(urls, file_title, subreddit_target_dir, new_only)
                    continue
                else:
                    print u"\"{}\" at {} is not a directly-hosted image or is not a single image on imgur.".format(title, url)
                    continue

            if not url:
                continue
            file_path = get_target_file_path(url, file_title, subreddit_target_dir, new_only=new_only)
            if file_path:
                save_image_from_url(url, file_path)

    if failed_downloads:
        print "\nThe following downloads failed: ------"
        for download_url in failed_downloads:
            print download_url

    if missing_pictures:
        print "\nThe following downloads are missing pictures: ------"
        for download_url in missing_pictures:
            print download_url

    if failed_subreddits:
        print "\nThe following subreddits failed: ------"
        for subreddit in failed_subreddits:
            print subreddit
示例#11
0
 def get_posts(post_dir):
     for post in os.listdir(post_dir):
         post_fpath = os.path.join(post_dir, post)
         if os.path.isfile(post_fpath) and has_extension(post, 'md'):
             yield post_fpath, page_layout, post_layout, tmp_dir, out_dir
示例#12
0
    if '-h' in args or len(args) != expected:
        print(
            f'Usage: {__file__} <html_dir> <layout_dir> <post_dir> <tmp_dir> <out_dir>'
        )
        print(f'\t-h : Display this help information')

    html_dir, layout_dir, post_dir, tmp_dir, out_dir = (*args[1:expected], )

    page_layout = ABS(layout_dir, '_Layout.html')
    post_layout = ABS(layout_dir, '_PostLayout.html')
    post_index_layout = ABS(layout_dir, '_PostIndexLayout.html')

    ## template all html pages
    for page in os.listdir(html_dir):
        page_fpath = os.path.join(html_dir, page)
        if os.path.isfile(page_fpath) and has_extension(page, 'html'):
            ext_len = len(
                '.html')  ## for stripping the file extension from the title
            normalised_title = f'{page[0].upper()}{page[1:-ext_len]}'
            template(page_layout, {'TITLE': normalised_title},
                     ABS(html_dir, page), ABS(out_dir, page))

    with Pool(WORKERS) as pool:

        def get_posts(post_dir):
            for post in os.listdir(post_dir):
                post_fpath = os.path.join(post_dir, post)
                if os.path.isfile(post_fpath) and has_extension(post, 'md'):
                    yield post_fpath, page_layout, post_layout, tmp_dir, out_dir

        ## find all markdown posts and convert them to html