def observe(sentry_dsn): ''' Decorator adding twisted raven client and blocking client if reactor is not yet running. :param str sentry_dsn: URL of sentry API ''' # create blocking client: raven_client = raven.base.Client(sentry_dsn) # add twisted logObserver with twisted raven client: observer = get_observer(sentry_dsn) if observer: log.addObserver(observer) import pytest; pytest.set_trace() def decorator(function): @wraps(function) def wrapper(*args, **kwargs): ''' Calls original function, catches any exception, sends it to sentry and re-raises it again. ''' try: return function(*args, **kwargs) except: raven_client.captureException(sys.exc_info()) raise # re-raise caught exception return wrapper return decorator
def test_get_raw_xml(self, secrets): # When running this test, use py.test -s to enable stdin agent = ReportAgent(secrets['API_Username'], secrets['API_User_Pass'], secrets['reportURL'], secrets['Report_Key']) raw = agent.get_raw_xml() pytest.set_trace() assert raw.tag == 'report'
def _read_target(self, output: bytes) -> None: # should return Target? match_target = re.search(br'(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)', output, re.DOTALL) if match_target: print(match_target.group(0)) layer_regex = ( r'Layer\s+(?P<i>\d+)\s+:\s+(.+)\r\n' r'Layer Width\s+=\s+({0})\s+A\s+;\r\n' r'\s+Layer #\s+(?P=i)- Density = ({0}) atoms/cm3 = ({0}) g/cm3\r\n' r'((?:\s+Layer #\s+(?P=i)-\s+{1}\s+=\s+{0}\s+Atomic Percent = {0}\s+Mass Percent\r\n)+)' ).format(double_regex, symbol_regex) layers = re.findall(layer_regex.encode('utf-8'), match_target.group(0)) if layers: element_regex = ( r'\s+Layer #\s+(\d+)-\s+({1})\s+=\s+({0})\s+Atomic Percent = ({0})\s+Mass Percent\r\n' ).format(double_regex, symbol_regex) element_regex_bytes = element_regex.encode() layers_elements = [] for layer in layers: # We know that elements will match layers_elements.append( re.findall(element_regex_bytes, layer[5])) raise NotImplementedError() import pytest pytest.set_trace() raise SRIMOutputParseError("unable to extract total target from file")
def _format_oneof(data, schema, replace_values=None): parameters = "" matched = 0 for sub_schema in schema["oneOf"]: try: if sub_schema.get("nullable") and data is None: # only one schema can be nullable formatted = "nil" else: sub_schema["nullable"] = False formatted = format_data_with_schema( data, sub_schema, replace_values=replace_values, ) if matched == 0: # NOTE we do not support mixed schemas with oneOf # parameters += formatted parameters = formatted matched += 1 except (KeyError, ValueError, TypeError): pass if matched == 0: import pytest pytest.set_trace() raise ValueError(f"[{matched}] {data} is not valid for schema") elif matched > 1: warnings.warn(f"[{matched}] {data} is not valid for schema") return parameters
def _get_phrase_list_from_words(self, word_list): """Method to create contender phrases from the list of words that form a sentence by dropping stopwords and punctuations and grouping the left words into phrases. Only phrases in the given length range (both limits inclusive) would be considered to build co-occurrence matrix. Ex: Sentence: Red apples, are good in flavour. List of words: ['red', 'apples', ",", 'are', 'good', 'in', 'flavour'] List after dropping punctuations and stopwords. List of words: ['red', 'apples', *, *, good, *, 'flavour'] List of phrases: [('red', 'apples'), ('good',), ('flavour',)] List of phrases with a correct length: For the range [1, 2]: [('red', 'apples'), ('good',), ('flavour',)] For the range [1, 1]: [('good',), ('flavour',)] For the range [2, 2]: [('red', 'apples')] :param word_list: List of words which form a sentence when joined in the same order. :return: List of contender phrases that are formed after dropping stopwords and punctuations. """ groups = groupby(word_list, lambda x: x not in self.to_ignore) phrases = [tuple(group[1]) for group in groups if group[0]] import pytest pytest.set_trace() return list( filter(lambda x: self.min_length <= len(x) <= self.max_length, phrases))
def main(): fc = SampleTest(3, 10) print dir(pytest) pytest.set_trace() ## DEBUG ## print [fc.calc() for i in dir(pytest)]
def may_debug(force=False): subprocess.run( 'cd /kaggle/working || cd kaggle_runner; ' 'git commit -asm "Good game"; git pull; git submodule update --init --recursive', shell=True) reload_me() import pdb if force: if "pytest" in sys.modules: import pytest pytest.set_trace() else: if INTERACTIVE: import ipdb ipdb.set_trace() else: pdb.set_trace() elif DEBUG: if "pytest" in sys.modules: import pytest pytest.set_trace() else: if INTERACTIVE: import ipdb ipdb.set_trace() else: pdb.set_trace()
def setup_access(client_id, client_secret): monzo = MonzoClient(client_id, client_secret, 'http://example.com/login/') auth_url = monzo.get_authorization_code() # pdb is needed to allow for the raw_input to stop the test. # Just type 'c' to continue on the breakpoint import pdb pdb.set_trace() print '\nA browser will now open, please login and copy the URL once authenticated' # NOQA if sys.platform == 'darwin': subprocess.Popen(['open', auth_url]) else: webbrowser.open_new_tab(auth_url) url = raw_input( "\nPlease enter the url from the 'Log in to Monzo' button in the authentication email: " ) # NOQA url = urlparse(url) import pytest pytest.set_trace() query = parse_qs(url.query) code = query['code'][0] token_info = monzo.get_access_token(code) base_dir = os.path.dirname(__file__) token_file = '{0}/token_info.json'.format(base_dir) with open(token_file, 'w') as fp: json.dump(token_info, fp) print 'Token info exported to: {0}'.format(token_file) return token_info
def handle_user_login_form(): try: pytest.set_trace() dict_form = request.form conf = { 'username': dict_form['username'], 'password': dict_form['password'], 'jsonPrettyPrint': dict_form.get('jsonPrettyPrint', False) == 'on' } s = verify_login(conf) debug = '' '''debug += '{0}\nSession vars:\n{1}\nQuery string:\n'.format( 'Successfully Logged In', json_serialize(s, session_id='test'))''' if s.has_key('error'): if (s['error'] == "Login Error"): resp = make_response(debug + 'session_id={0}'.format(s['error']), httplib.OK) else: resp = make_response(debug + 'session_id={0}'.format(s['session_id']), httplib.OK) resp.headers['Content-type'] = 'text/plain' resp.headers['Access-Control-Allow-Origin'] = "*" return resp except KeyError as ex: return MissingParameterHTTPError(exception=ex)
def load(cls, dirname=None, locales=None, domain=DEFAULT_DOMAIN): """Load translations from the given directory. :param dirname: the directory containing the ``MO`` files :param locales: the list of locales in order of preference (items in this list can be either `Locale` objects or locale strings) :param domain: the message domain :return: the loaded catalog, or a ``NullTranslations`` instance if no matching translations were found :rtype: `Translations` """ if locales is not None: if not isinstance(locales, (list, tuple)): locales = [locales] locales = [str(l) for l in locales] if not domain: domain = cls.DEFAULT_DOMAIN filename = gettext.find(domain, dirname, locales) import pytest pytest.set_trace() if not filename: return gettext.NullTranslations() with open(filename, 'rb') as fp: return cls(fileobj=fp, domain=domain)
def test_upload(selenium): login(selenium) assert_body_text(selenium, "Upload", "Logout") selenium.find_element_by_link_text("Upload").click() assert_body_text( selenium, "Data Set Import", "Tabular Metadata", "ISA-Tab Metadata", "PROVIDE METADATA FILE", "Download an example", "Choose delimiter", "Select file", ) path = os.environ["UPLOAD"] # TODO: File uploads did work in the old UI, but no longer. # Can we trigger the event Angular is looking for? selenium.find_element_by_name("tabular_file").send_keys(path) # selenium.execute_script('$("[name=tabular_file]").change()') # assert_body_text(selenium, 'PREVIEW (5 ROWS)') # expected_title = re.sub(r'\..*$', '', re.sub(r'^.*/', '', path)) # title_el = selenium.find_element_by_name('title') # assert title_el.get_attribute('value') == expected_title if not_travis: pytest.set_trace()
def _read_target(self, output): match_target = re.search(b"(?<=====\r\n)Layer\s+\d+\s+:.*?(?=====)", output, re.DOTALL) if match_target: print(match_target.group(0)) layer_regex = ( "Layer\s+(?P<i>\d+)\s+:\s+(.+)\r\n" "Layer Width\s+=\s+({0})\s+A\s+;\r\n" "\s+Layer #\s+(?P=i)- Density = ({0}) atoms/cm3 = ({0}) g/cm3\r\n" "((?:\s+Layer #\s+(?P=i)-\s+{1}\s+=\s+{0}\s+Atomic Percent = {0}\s+Mass Percent\r\n)+)" ).format(double_regex, symbol_regex) layers = re.findall(layer_regex.encode("utf-8"), match_target.group(0)) if layers: element_regex = ( "\s+Layer #\s+(\d+)-\s+({1})\s+=\s+({0})\s+Atomic Percent = ({0})\s+Mass Percent\r\n" ).format(double_regex, symbol_regex) element_regex = element_regex.encode() layers_elements = [] for layer in layers: # We know that elements will match layers_elements.append(re.findall(element_regex, layer[5])) raise NotImpementedError() import pytest pytest.set_trace() raise SRIMOutputParseError("unable to extract total target from file")
def parse_values(values, outfile): """ Given a file handle and the raw values from a MySQL INSERT statement, write the equivalent CSV to the file """ latest_row = [] import pytest pytest.set_trace() reader = csv.reader([values], delimiter=',', doublequote=False, escapechar='\\', quotechar="'", strict=True) writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL) for reader_row in reader: for column in reader_row: # If our current string is empty... if len(column) == 0 or column == 'NULL': latest_row.append(chr(0)) continue # If our string starts with an open paren if column[0] == "(": # Assume that this column does not begin # a new row. new_row = False # If we've been filling out a row if len(latest_row) > 0: # Check if the previous entry ended in # a close paren. If so, the row we've # been filling out has been COMPLETED # as: # 1) the previous entry ended in a ) # 2) the current entry starts with a ( if latest_row[-1][-1] == ")": # Remove the close paren. latest_row[-1] = latest_row[-1][:-1] new_row = True # If we've found a new row, write it out # and begin our new one if new_row: writer.writerow(latest_row) latest_row = [] # If we're beginning a new row, eliminate the # opening parentheses. if len(latest_row) == 0: column = column[1:] # Add our column to the row we're working on. latest_row.append(column) # At the end of an INSERT statement, we'll # have the semicolon. # Make sure to remove the semicolon and # the close paren. if latest_row[-1][-2:] == ");": latest_row[-1] = latest_row[-1][:-2] writer.writerow(latest_row)
def parse_values(values, outfile): """ Given a file handle and the raw values from a MySQL INSERT statement, write the equivalent CSV to the file """ latest_row = [] import pytest pytest.set_trace() reader = csv.reader([values], delimiter=',', doublequote=False, escapechar='\\', quotechar="'", strict=True ) writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL) for reader_row in reader: for column in reader_row: # If our current string is empty... if len(column) == 0 or column == 'NULL': latest_row.append(chr(0)) continue # If our string starts with an open paren if column[0] == "(": # Assume that this column does not begin # a new row. new_row = False # If we've been filling out a row if len(latest_row) > 0: # Check if the previous entry ended in # a close paren. If so, the row we've # been filling out has been COMPLETED # as: # 1) the previous entry ended in a ) # 2) the current entry starts with a ( if latest_row[-1][-1] == ")": # Remove the close paren. latest_row[-1] = latest_row[-1][:-1] new_row = True # If we've found a new row, write it out # and begin our new one if new_row: writer.writerow(latest_row) latest_row = [] # If we're beginning a new row, eliminate the # opening parentheses. if len(latest_row) == 0: column = column[1:] # Add our column to the row we're working on. latest_row.append(column) # At the end of an INSERT statement, we'll # have the semicolon. # Make sure to remove the semicolon and # the close paren. if latest_row[-1][-2:] == ");": latest_row[-1] = latest_row[-1][:-2] writer.writerow(latest_row)
def test_people_list(self): ppl = Legislators() resp = ppl.current_congress() assert type(resp) is dict pks = [x['person']['id'] for x in resp['objects']] pytest.set_trace() resp = ppl.committees(pks)
def equal(arr): dp = [[[] for _ in increments] for _ in arr] # todo handle case where all el in arr are the same for i, el in enumerate(arr): for j, inc in enumerate(increments): dp[i][j] = el + inc pytest.set_trace() return dp
def test_hello_world(): runner = CliRunner() import pytest pytest.set_trace() result = runner.invoke(hello, ["world"], catch_exceptions=False) assert result.exit_code == 0 assert result.output == "Hello, world!\n"
def _breakpoint(): if "PYTEST_CURRENT_TEST" in os.environ: import pytest pytest.set_trace(skip=_skip) else: _custom_pdb.set_trace()
def test_remove_bin_with_ttl_too_large(self, put_data): key = ('test', 'demo', 1) record = {"Name": "Herry", 'age': 60} put_data(self.as_connection, key, record) pytest.set_trace() meta = {'gen': 2, 'ttl': 2 ** 65} with pytest.raises(e.ClientError) as typeError: self.as_connection.remove_bin(key, ["age"], meta=meta)
def runtest(self): for method, args, kwargs in self.run: if mode.skip: pytest.skip('skip mode') if mode.debug: pytest.set_trace() # noqa mode.debug = mode.debug - 1 if mode.debug >= 1 else 0 method(*args, **kwargs)
def main(self): """the main function creates the MySQL statement in accordance with the user's input and using convert(), and os.path.getsize(). """ data, fields = self.convert() str_fields = ','.join(fields) file_size = os.path.getsize(self.file_path) values = [] r = 0 import pytest pytest.set_trace() for record in data: value = '' for column_no in xrange(0, len(fields)): if column_no == 0: value = '"' + record[fields[column_no]] else: value += '", "' + record[fields[column_no]] value += '"' if file_size <= 1000000: import pytest pytest.set_trace() # value = eval(value) value = ast.literal_eval(value) values.append(value) else: query = """INSERT INTO %s (%s) VALUES """ % (self.new_table, str_fields) statement = query + "(" + value + ")" # statement = query + str(value) execute_sql(statement) r += 1 if file_size <= 1000000: query = "INSERT INTO " + self.new_table + "(" + str_fields + ") VALUES (%s" for i in xrange(0, len(fields)-1): query += ", %s" query += ")" query = str(query) # # import pytest # pytest.set_trace() affected = executemany_sql(query, values) print self.new_table,affected, "row affected.(file size<1MB)" else: print self.new_table,r, "row affected.(file size>1MB)"
def yahoo_stock_info(self, ticker): r = requests.get('http://finance.yahoo.com/quote/AAPL?p=' + ticker) soup = BeautifulSoup(r.text) tables = soup.find_all('table')[1:] # drop the first useless table # curl 'https://query2.finance.yahoo.com/v10/finance/quoteSummary/AAPL?modules=summaryProfile%2CfinancialData%2CrecommendationTrend%2CupgradeDowngradeHistory%2Cearnings%2CdefaultKeyStatistics%2CcalendarEvents' | python -m json.tool > tmp2.txt print(soup.prettify()) import pytest pytest.set_trace()
def get_lane_fit(self, ysize): try: ploty = np.linspace(0, ysize - 1, ysize) line_fit = self.pixels_fit()[0] * ploty**2 + self.pixels_fit( )[1] * ploty + self.pixels_fit()[2] except: import pytest pytest.set_trace() return line_fit, ploty
def main(self): """the main function creates the MySQL statement in accordance with the user's input and using convert(), and os.path.getsize(). """ data, fields = self.convert() str_fields = ','.join(fields) file_size = os.path.getsize(self.file_path) values = [] r = 0 import pytest pytest.set_trace() for record in data: value = '' for column_no in xrange(0, len(fields)): if column_no == 0: value = '"' + record[fields[column_no]] else: value += '", "' + record[fields[column_no]] value += '"' if file_size <= 1000000: import pytest pytest.set_trace() # value = eval(value) value = ast.literal_eval(value) values.append(value) else: query = """INSERT INTO %s (%s) VALUES """ % (self.new_table, str_fields) statement = query + "(" + value + ")" # statement = query + str(value) execute_sql(statement) r += 1 if file_size <= 1000000: query = "INSERT INTO " + self.new_table + "(" + str_fields + ") VALUES (%s" for i in xrange(0, len(fields) - 1): query += ", %s" query += ")" query = str(query) # # import pytest # pytest.set_trace() affected = executemany_sql(query, values) print self.new_table, affected, "row affected.(file size<1MB)" else: print self.new_table, r, "row affected.(file size>1MB)"
def test_increment_past_max_size(self): key = ('test', 'demo', 'overflow') record = {'age': 2**63 - 1} self.as_connection.put(key, record) self.as_connection.increment(key, 'age', 2) _, _, data = self.as_connection.get(key) pytest.set_trace() self.as_connection.remove(key) assert data['age'] > 2**63 - 1
def test_increment_beyond_min_size(self): key = ('test', 'demo', 'overflow') record = {'age': -2**63} self.as_connection.put(key, record) self.as_connection.increment(key, 'age', -2) _, _, data = self.as_connection.get(key) pytest.set_trace() self.as_connection.remove(key) assert data['age'] < -2**63
def before_tag(context, tag): if tag.startswith('fixture'): fixtures = { 'fixture.setup_testing_db_and_add_default_menu': (setup_testing_db_and_add_default_menu, ), 'fixture.add_submenu': add_submenu, } pytest.set_trace() return use_fixture_by_tag(tag, context, fixtures)
def get_frequency(demo_file_path): words_lst = list() with open(demo_file_path, 'r') as f: content_lst = f.readlines() # for sentence in content_lst: # sentence = sentence.strip() content_str = " ".join(content_lst) words_lst = content_str.split() fdist = FreqDist(words_lst) import pytest pytest.set_trace() print(fdist.tabulate(40, cumulative=True))
def test_that_analysis_404s_are_handled( selenium, total_analyses=TOTAL_ANALYSES): """Test use case where Analysis objects are deleted (for example by an admin, or a user inbetween multiple windows) while a user is about to delete said object(s) themselves User should receive a "Not Found" message""" login(selenium) # Create sample Data make_analyses_with_single_dataset(total_analyses, user) selenium.implicitly_wait(3) selenium.refresh() selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(1)) assert_text_within_id( selenium, "total-analyses", "{} analyses".format(total_analyses) ) # Simulate scenario where objects have been deleted on the backend Analysis.objects.all().delete() selenium.find_elements_by_class_name('analysis-delete')[0].click() selenium.implicitly_wait(3) wait_until_id_clickable(selenium, 'analysis-delete-button', 5).click() selenium.implicitly_wait(3) assert_text_within_id(selenium, "deletion-message-text", "not found.") wait_until_id_clickable( selenium, 'analysis-delete-close-button', 5).click() selenium.implicitly_wait(5) # Ensure that ui displays proper info after a refresh assert_text_within_id( selenium, "total-analyses", "{} analysis".format(0)) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(1)) if not_travis: pytest.set_trace()
def test_get_observations_with_notes_multidimension(): filters = [ ('indicator-group', 'ecommerce'), ('indicator', 'i_bgoodo'), ('breakdown-group', 'total'), ('breakdown', 'IND_TOTAL',), ('unit-measure', 'pc_ind'), ('time-period', '2006'), ] cube = create_cube() points = list(cube.get_data_xyz('ref-area', filters, [], [], [])) import pytest;pytest.set_trace() note='Estimation - based on results of 2007 HH survey'; assert filter( lambda item: item['ref-area']['notation'] == 'CZ', points)[0]['note'] == {'x': note, 'y':note, 'z':note}
def merge(self, datum: typing.Dict[str, str], as_defaults: bool = False) -> PWN: if not as_defaults: for name, value in datum.items(): self.append(name, value) else: # This isn't working for some reason... import pytest pytest.set_trace() for name, value in datum.items(): if name in datum.keys(): logger.debug(f'skipping {name}') else: self.append(name, value) return self
def main(): """ Parse arguments and start the program """ # Iterate over all lines in all files # listed in sys.argv[1:] # or stdin if no args given. try: for line in fileinput.input(): # Look for an INSERT statement and parse it. if is_insert(line): import pytest pytest.set_trace() values = get_values(line) if values_sanity_check(values): parse_values(values, sys.stdout) except KeyboardInterrupt: sys.exit(0)
def sync_with_remote(cls): token = 'BQBg51i1JjKSZ5IPRG1c6nPM7VoIVqf-sbG_vR4go0GznWChYDqSmAKw1cjZiLoXuQLiERvzyVbhPNfYYTxw0vgk3rQ6bSQ0kwBXHxi9hl_Aov3EbGpwqA3Iu6u7BKqFWIdYqPAD18gOz_Qzlw_LZZaG0d-c' headers = {'Authorization': f'''Bearer {token}'''} session = cls.session() sql_records = cls.session().query(cls.DATA_MODEL).all() for time_range in ['short_term', 'medium_term', 'long_term']: query = {'time_range': time_range, 'limit': 50} # query API for all artists in time range resp = requests.get(f'{cls.RESOURCE_URL}?{urlencode(query)}', headers=headers) # query DB for all artists import pytest pytest.set_trace() for rank, obj in enumerate(resp.json()['items']): sql_record = next((sql_record for sql_record in sql_records if obj['id'] == sql_record.id and time_range == sql_record.time_range), None) # if artist is already in DB. Will be replaced later if sql_record: sql_records.remove(sql_record) new_record = sql_record # otherwise create new sql record based on api response object else: new_record = cls.DATA_MODEL() for col in cls.DATA_MODEL.__table__.columns.keys(): if col in obj: setattr(new_record, col, obj[col]) new_record.time_range = time_range new_record.rank = rank if new_record != sql_record: session.add(new_record) for sql_record in sql_records: session.delete(sql_record) try: session.commit() except InvalidRequestError as e: logging.error(e)
def levenshtein(s1, s2): if len(s1) < len(s2): return levenshtein(s2, s1) # len(s1) >= len(s2) if len(s2) == 0: return len(s1) set_trace() previous_row = list(range(len(s2) + 1)) for i, c1 in enumerate(s1): current_row = [i + 1] for j, c2 in enumerate(s2): insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer deletions = current_row[j] + 1 # than s2 substitutions = previous_row[j] + (c1 != c2) current_row.append(min(insertions, deletions, substitutions)) previous_row = current_row return previous_row[-1]
def test_dataset_deletion(selenium, total_datasets=TOTAL_DATASETS): """Delete some datasets and make sure the ui updates properly""" login(selenium) # Create sample Data make_datasets(total_datasets, user) selenium.implicitly_wait(3) selenium.refresh() selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(total_datasets) ) # Delete individual Datasets until there are none left while total_datasets: selenium.find_elements_by_class_name('dataset-delete')[0].click() selenium.implicitly_wait(3) wait_until_id_clickable(selenium, 'dataset-delete-button', 5).click() total_datasets -= 1 wait_until_id_clickable( selenium, 'dataset-delete-close-button', 5).click() selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(total_datasets) ) selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(total_datasets)) if not_travis: pytest.set_trace()
def test_cascading_deletion_of_analyses(selenium, total_analyses=TOTAL_ANALYSES): """Delete a Dataset and make sure its Analyses are removed from the UI as well""" login(selenium) # Create sample Data make_analyses_with_single_dataset(total_analyses, user) selenium.implicitly_wait(3) selenium.refresh() selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(1)) assert_text_within_id( selenium, "total-analyses", "{} analyses".format(total_analyses) ) selenium.find_elements_by_class_name('dataset-delete')[0].click() selenium.implicitly_wait(3) wait_until_id_clickable(selenium, 'dataset-delete-button', 5).click() wait_until_id_clickable( selenium, 'dataset-delete-close-button', 5).click() selenium.implicitly_wait(5) assert_text_within_id( selenium, "total-analyses", "{} analysis".format(0)) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(0)) if not_travis: pytest.set_trace()
def move_directory(enode, files=[], cert_dir=None, shell=None): # Verify directory is not empty, if it is set /etc/ssl/certs/ as direcotry if cert_dir is None: cert_dir = '/etc/ssl/certs/' # check if directory exists cmd_ls = 'ls ' + cert_dir file_exists = enode(cmd_ls, shell) if 'No such file or directory' in str(file_exists): # creates the file cmd_mkdir = 'mkdir ' + cert_dir result_mkdir = enode(cmd_mkdir, shell=shell) set_trace() assert '' in result_mkdir, 'unable to create directoty ' + cert_dir for file in files: cmd_mv = 'mv ' + file + ' ' + cert_dir result_mv = enode(cmd_mv, shell) assert '' in result_mv, 'unable to move the file ' + file \ + ' to ' + cert_dir
def pytest_runtest_setup(item): global pause global fh,tc_outfile,stb_ip,tc_log_path, tcName,cpe if "incremental" in item.keywords: previousfailed = getattr(item.parent, "_previousfailed", None) if previousfailed is not None: pytest.xfail("previous test failed (%s)" %previousfailed.name) if config.getoption('--pause_on_fail'): cprint("Test failed . Entering debugging mode" , 'red') pytest.set_trace() try: classname="%s.%s" % (item.module.__name__,item.cls.__name__) except: classname="%s" % (item.module.__name__) tcName="%s.%s" % (classname,item.name) print cprint("\n--TC_START %s" % tcName , 'blue')
def test_clean_decimal_data(self): file_content = read_json_file(ROOT_DIR + "secrets.json") connection = pymysql.connect(host='mariaDB', user='******', password='******', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor, port=3306) this_day = datetime.datetime.now() year, month, day = get_date(this_day) sql = sql_query_top_downloads_on_odesi_for_last_month(year, month, qty=20) data = execute_this_query(connection, sql) pytest.set_trace() assert isinstance(data[0]['Download Count'], Decimal) data = clean_decimal_data(data) pytest.set_trace() assert isinstance(data[0]['Download Count'], int)
def post(self): session_id = get_session_id(session, request) content_length = request.content_length max_length = 30 * 1024 * 1024 if content_length > max_length: raise MissingParameterHTTPError( exception=RuntimeError('File exceeded maximum size (30MB)')) try: package = request.files['file'] except LookupError as ex: raise MissingParameterHTTPError(param_names=['file']) except Exception as ex: raise CairisHTTPError(status_code=CONFLICT, message=str(ex), status='Unknown error') try: dao = ImportDAO(session_id) dao.package_import(package.stream.read()) dao.close() except DatabaseProxyException as ex: raise ARMHTTPError(ex) except ARMException as ex: raise ARMHTTPError(ex) except Exception as ex: import pytest pytest.set_trace() raise CairisHTTPError(status_code=500, message=str(ex), status='Unknown error') resp_dict = {'message': 'Package successfully imported'} resp = make_response(json_serialize(resp_dict, session_id=session_id), OK) resp.contenttype = 'application/json' return resp
def fit_lstm(train, batch_size, nb_epoch, neurons): X, y = train[:, 0:-1], train[:, -1] X = X.reshape(X.shape[0], 1, X.shape[1]) model = Sequential() import pytest pytest.set_trace() model.add( LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') for i in range(nb_epoch): if (i % 100) == 0: print("Epochs is at %d" % i) model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) model.reset_states() return model
def test_eazy(host_obj): if os.path.isdir(data_path('eazy')): shutil.rmtree(data_path('eazy')) os.mkdir(data_path('eazy')) # Generate frbeazy.eazy_input_files(host_obj.photom, data_path('eazy/input'), host_obj.name, data_path('eazy/output'), templates='br07_default', prior_filter='GMOS_S_r') # Test assert os.path.isfile(data_path('eazy/input/G_TEST.cat')) assert os.path.isfile(data_path('eazy/input/zphot.param.G_TEST')) assert os.path.isfile(data_path('eazy/input/zphot.translate.G_TEST')) # Run frbeazy.run_eazy(data_path('eazy/input'), host_obj.name, os.path.join(data_path('eazy/output'), 'logfile')) assert os.path.isfile(data_path('eazy/output/photz.zout')) # Read zgrid, pzi, prior = frbeazy.getEazyPz( -1, MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY=data_path('eazy/output'), CACHE_FILE='Same', binaries=None, get_prior=True) zphot, sig_zphot = frbeazy.eazy_stats(zgrid, pzi) assert np.isclose(zphot, 0.5929259648750858, rtol=1e-4) # Remove pytest.set_trace() shutil.rmtree(data_path('eazy'))
series = read_csv('shampoo-sales.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=date_parser) raw_values = series.values diff_values = difference(raw_values, 1) supervised = timeseries_to_supervised(diff_values, 1) supervised_values = supervised.values train, test = supervised_values[0:-12], supervised_values[-12:] import pytest pytest.set_trace() scaler, train_scaled, test_scaled = scale(train, test) import pytest pytest.set_trace() lstm_model = fit_lstm(train_scaled, 1, 3000, 4) train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1) lstm_model.predict(train_reshaped, batch_size=1) predicts = list() for i in range(len(test_scaled)): X, y = test_scaled[i, 0:-1], test_scaled[i, -1] yhat = forecast_lstm(lstm_model, 1, X) yhat = invert_scale(scaler, X, yhat) yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i) predicts.append(yhat)
def test_dust_correct(): correct = photom.extinction_correction('GMOS_S_r', 0.138) pytest.set_trace()
def test(event): pytest.set_trace()
def test_login_not_required(selenium): selenium.get(base_url) assert_body_text( selenium, "Collaboration", "Statistics", "About", "Register", "Login", "Launch Pad", "Data Sets", "Analyses", "Workflows", ) selenium.find_element_by_link_text("Statistics").click() assert_body_text(selenium, "Users", "Groups", "Files", "Data Sets", "Workflows", "Projects") selenium.find_element_by_link_text("About").click() assert_body_text(selenium, "Background", "Contact", "Funding", "Team", "Most Recent Code for this Instance") # TODO: All sections are empty right now selenium.find_element_by_link_text("Register").click() assert_body_text( selenium, "Sign Up", "Register for an account", "Indicates a required field", "USERNAME", "FIRST NAME", "LAST NAME", "AFFILIATION", "EMAIL ADDRESS", "PASSWORD (AGAIN)", ) selenium.find_element_by_name("username").send_keys("guest") selenium.find_element_by_xpath('//input[@type="submit"]').click() assert_body_text( selenium, "Please correct the errors below", "A user with that username already exists", "You must provide a First Name", "You must provide a Last Name", "You must provide an Affiliation", "This field is required", ) stamp = str(time()) # Helps prevent collisions when running locally. selenium.find_element_by_name("username").send_keys(stamp) selenium.find_element_by_name("first_name").send_keys("first") selenium.find_element_by_name("last_name").send_keys("last") selenium.find_element_by_name("affiliation").send_keys("affiliation") selenium.find_element_by_name("email").send_keys("*****@*****.**" % stamp) selenium.find_element_by_name("password1").send_keys("password") selenium.find_element_by_name("password2").send_keys("password") selenium.find_element_by_xpath('//input[@type="submit"]').click() assert_body_text(selenium, "Registration complete") if not_travis: pytest.set_trace()
def square(x): """A function where we want to pause and inspect things.""" import pytest; pytest.set_trace() return x * 2
def use_filter(number): filter_ = request.url_rule.endpoint template = '{{ filter_ }} {{ number|%s }}' % filter_ pytest.set_trace() return render_template_string(template, **locals())
def trace(): """Enter pytest's pdb trace.""" pytest.set_trace()
def test_pytest_set_trace(): print "foo" # press 'c' to complete test import pytest pytest.set_trace()
from selenium import webdriver import time #driver = webdriver.Firefox() driver = webdriver.Chrome('F:\\Cdriver\chromedriver.exe') driver.maximize_window() driver.get("http:www.facebook.com") driver.find_element_by_id("email").send_keys("*****@*****.**") driver.find_element_by_id("pass").send_keys("CHINNU143") driver.find_element_by_id("loginbutton").click() import pytest; pytest.set_trace() driver.execute_script("window.scrollTo(0,2000)") driver.find_element_by_link_text('Account Settings').click() driver.find_element_by_xpath('//input[@value ="Log Out"]').click() #time.sleep(15) #driver.close()
def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest pytest.set_trace() errno = pytest.main(self.pytest_args) sys.exit(errno)
def test_debugger(): a = 2 pytest.set_trace() funct() assert a == 2
def test_counter_reset(self): counter = {1: 1, 2: 0} listener.reseter(1) import pytest; pytest.set_trace()
def test_options_for_scan_check_for_single_args(monkeypatch, command, expected): monkeypatch.setattr(sys, "argv", command.split(" ")) result, _ = scan_utils.options() if not result == expected: pytest.set_trace() assert result == expected
def debug(context): import pytest pytest.set_trace() pass
def test_analysis_deletion(selenium, total_analyses=TOTAL_ANALYSES): """Delete some analyses and make sure the ui updates properly""" login(selenium) # Create sample Data make_analyses_with_single_dataset(total_analyses, user) selenium.implicitly_wait(3) selenium.refresh() selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(1)) assert_text_within_id( selenium, "total-analyses", "{} analyses".format(total_analyses) ) while total_analyses: selenium.find_elements_by_class_name('analysis-delete')[0].click() selenium.implicitly_wait(3) wait_until_id_clickable(selenium, 'analysis-delete-button', 5).click() total_analyses -= 1 wait_until_id_clickable( selenium, 'analysis-delete-close-button', 5).click() selenium.implicitly_wait(3) # Make sure the number of analyses indicator displays the correct info assert_text_within_id("analyses-indicator", total_analyses) if total_analyses <= 1: assert_text_within_id( selenium, "total-analyses", "{} analysis".format( total_analyses) ) else: assert_text_within_id( selenium, "total-analyses", "{} analyses".format( total_analyses) ) selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-analyses", "{} analysis".format(total_analyses)) selenium.find_elements_by_class_name('dataset-delete')[0].click() selenium.implicitly_wait(3) wait_until_id_clickable(selenium, 'dataset-delete-button', 5).click() selenium.implicitly_wait(3) assert_text_within_id( selenium, "total-datasets", "{} data sets".format(0)) if not_travis: pytest.set_trace()