def get_problems(self, urls): """Overridden. """ problems = [] for url in urls: problem = Problem() problem.url = url url_path = urlparse(url).path assert url_path tokens = SiteSpoj.pattern_contest.search(url_path) problem.id = tokens.group('PROBLEM') assert problem.id page = SiteSpoj._proxy.get(url) # Data from web (for each problem): if page.status_code == 200: t = html.fromstring(page.text) # - problem name, e = t.xpath(SiteSpoj.xpath_problem_name) problem.name = (e and str(e[0])) or None # - problem time limit, e = t.xpath(SiteSpoj.xpath_problem_time) p = e and e[0].strip()[:-1] # remove whitespace characters and 's' at the end problem.time_limit_ms = p and float(p) * 1000 # - problem source limit, e = t.xpath(SiteSpoj.xpath_problem_source) p = e and e[0].strip()[:-1] # remove whitespace characters and 'B' at the end problem.source_limit_kbyte = p and float(p) / 1000 # - problem memory limit, e = t.xpath(SiteSpoj.xpath_problem_memory) p = e and e[0].strip()[:-2] # remove whitespace characters and 'MB' at the end problem.memory_limit_kbyte = p and float(p) * 2**10 # - test inputs and outputs. e = t.xpath(SiteSpoj.xpath_problem_ins_outs) problem.inputs = [i.strip() for i in e[0:][::2]] problem.outputs = [o.strip() for o in e[1:][::2]] if (problem.name and problem.time_limit_ms and problem.source_limit_kbyte and problem.memory_limit_kbyte and problem.inputs and problem.outputs): problems.append(problem) else: warn('Problem "' + problem.id + '" not fetched successfully!') else: warn('Problem "' + problem.id + '" does not exist on Spoj!') return problems
def match_problems(self, conf): """Overridden. """ url_contest = self.match_contest(conf) url_template_problem = url_contest + SiteCodeforces.url_template_suffix_problem page = SiteCodeforces._proxy.get(url_contest) # Data from web: # - available problem ids. if page.status_code == 200: t = html.fromstring(page.text) e = t.xpath(SiteCodeforces.xpath_problem_ids) ids_available = [str(e.strip()) for e in e] ids = [] # Match single problem from 'location'. location = urlparse(conf['location']).path or '/' tokens = SiteCodeforces.pattern_contest.search(location) if tokens is not None: id_raw = tokens.group('PROBLEM') id_problem = SiteCodeforces.resolve_problem_id(id_raw) if id_problem: ids.append(id_problem) # Match potentially multiple problems from 'problems'. for problem in conf['problems']: tokens = SiteCodeforces.pattern_problem.findall(problem) id_raw = tokens and tokens[-1] id_problem = SiteCodeforces.resolve_problem_id(id_raw) if id_problem: ids.append(id_problem) # If no problems are successfully manually selected, select them all. if not ids: ids = ids_available # Notify about selected but non-available problems. urls = [] for id in ids: if id in ids_available: urls.append(url_template_problem.format(id)) else: warn('Problem "' + id + '" does not exist in ' + url_contest) return sorted(urls)
def match_problems(self, conf): """Overridden. """ url_contest = self.match_contest(conf) url_template_problem = url_contest + SiteCodeChef.url_template_suffix_problem page = SiteCodeChef._proxy.get(url_contest) # Data from web: # - available problem ids. if page.status_code == 200: t = html.fromstring(page.text) e = t.xpath(SiteCodeChef.xpath_problem_ids) ids_available = [str(e.strip()) for e in e] else: warn('Unable to fetch: ' + url_contest) return [] ids_selected = [] # Match single problem from 'location'. location = urlparse(conf["location"]).path or "/" tokens = SiteCodeChef.pattern_contest.search(location) if tokens is not None: problem = tokens.group("PROBLEM") if problem is not None: ids_selected.append(problem) # Match potentially multiple problems from 'problems'. for problem in conf["problems"]: tokens = SiteCodeChef.pattern_problem.findall(problem) ids_selected.extend(tokens) # If no problems are successfully manually selected, select them all. if not ids_selected: ids = ids_available else: ids = SiteCodeChef.get_problem_ids(ids_selected, ids_available) return [url_template_problem.format(id) for id in ids]
def get_problems(self, urls): """Overridden. """ problems = [] for url in urls: problem = Problem() problem.url = url url_path = urlparse(url).path assert url_path tokens = SiteRosalind.pattern_contest.search(url_path) problem.id = tokens.group('PROBLEM') assert problem.id page = SiteRosalind._proxy.get(url) # Data from web (for each problem): if page.status_code == 200: t = html.fromstring(page.text) # - problem name, e = t.xpath(SiteRosalind.xpath_problem_name) problem.name = (e and str(e[0]).strip()) or None # - test input, (single fetched) e = t.xpath(SiteRosalind.xpath_problem_ins) problem.inputs = e and [str(e[0]).strip()] # - test outputs, (single fetched) e = t.xpath(SiteRosalind.xpath_problem_outs) problem.outputs = e and [str(e[0]).strip()] if (problem.name and problem.inputs and problem.outputs): problems.append(problem) else: warn('Problem "' + problem.id + '" does not exist on Rosalind!') return problems
def _command_prep(**args): """Prepares the environment for selected problems. This command is idempotent irrespective of the "--force" switch. """ conf_all = args['conf_all'] dir_working = expanduser(conf_all['workdir']) # 1) Working directory has to exist. if not isdir(dir_working): error('Directory "' + dir_working + '" does not exist!') return ExitStatus.ERROR # 2) Establish contest directory. contest_obj = args['contest_obj'] if conf_all['subdir_depth'] == 2: dir_contest = join(dir_working, contest_obj.id) safe_mkdir(dir_contest, force=conf_all['force']) else: dir_contest = dir_working # Proceed if there exists directory hierachy until this point if (isdir(dir_contest)): # 3) Establish problems directories. problems_objs = args['problems_objs'] if conf_all['subdir_depth'] >= 1: problems_dirs = {} for prob in problems_objs: problems_dirs[prob] = join(dir_contest, prob.id) safe_mkdir(problems_dirs[prob], force=conf_all['force']) else: problems_dirs = {prob: dir_contest for prob in problems_objs} plugin_langs = args['plugin_langs'] selected_langs = conf_all['lang'] sep_langs = hac.SETTINGS_CONST['plugin_temp_sep'][DataType.LANG] plugin_runners = args['plugin_runners'] selected_runners = conf_all['runner'] sep_runners = hac.SETTINGS_CONST['plugin_temp_sep'][DataType.RUNNER] # 4) Create language and runner templates. # For each problem ... for prob in problems_objs: if isdir(problems_dirs[prob]): problem_path = join(problems_dirs[prob], prob.id) # ... create language for all selected languages. for lang in selected_langs: assert sep_langs in lang assert lang in plugin_langs lang_ext = lang.split(sep_langs)[0] lang_file = problem_path + os.extsep + lang_ext safe_fwrite(lang_file, plugin_langs[lang], force=conf_all['force']) # ... create runner for every combiantion of runner/language. for runn in selected_runners: for lang in selected_langs: assert sep_runners in runn assert runn in plugin_runners runn_ext = runn.split(sep_runners)[0] lang_ext = lang.split(sep_langs)[0] if lang_ext in plugin_runners[runn]: runner_file = problem_path + os.extsep + \ lang_ext + os.extsep + runn_ext safe_fwrite(runner_file, plugin_runners[runn][lang_ext], force=conf_all['force'], executable=True) else: warn("Runner for [{0}/{1}] combo can't be created!" .format(runn, lang_ext)) # 5) Dump inputs and outputs. if conf_all['tests'] >= 1: for i, inp in enumerate(prob.inputs): in_file = join(problems_dirs[prob], prob.id + os.extsep + str(i+1) + os.extsep + 'in') safe_fwrite(in_file, inp, force=conf_all['force']) for i, out in enumerate(prob.outputs): out_file = join(problems_dirs[prob], prob.id + os.extsep + str(i+1) + os.extsep + 'out') safe_fwrite(out_file, out, force=conf_all['force']) return ExitStatus.OK