Exemplo n.º 1
0
def mt_worker(chunk):
    """ Worker function that operates on single chunk (has access to global variables set in _setup """

    # read chunk of data and make sure it is 3-D: BxYxX
    data = MT_RFUNC(chunk)
    shape = data.shape
    if len(shape) == 2:
        data = data.reshape((1, shape[0], shape[1]))
        shape = data.shape

    # make output array for this chunk
    output = np.empty((MT_OUTBANDS, shape[1], shape[2]))
    output[:] = np.nan

    # only run on valid pixel signatures unless nodata set
    if MT_DROPNODATA:
        valid = np.all(~np.isnan(data), axis=0)
    else:
        valid = np.ones((shape[1], shape[2])).astype('bool')

    # run processing function
    try:
        output[:, valid] = MT_PFUNC(data[:, valid])
    except Exception, e:
        print "Error processing chunk %s: %s" % (' '.join(chunk), e)
        print traceback.format_exc()
        from nose.tools import set_trace
        set_trace()
Exemplo n.º 2
0
 def test_packages(self):
     names = [pkg.name for pkg in model.Session.query(model.Package).all()]
     from nose.tools import set_trace; set_trace()
     assert_equal(names, [self.name])
     pkg = model.Package.by_name(self.name)
     assert pkg
     assert_equal(len(pkg.resources), self.num_resources_originally + 1)
Exemplo n.º 3
0
def test_get_spectrum_zeros():
    carr = np.zeros((10,6),dtype='F')
    x,sp = spect.get_spectrum(carr, 5)
    import pylab as pl
    pl.ion()
    pl.plot(x, sp)
    set_trace()
Exemplo n.º 4
0
    def test_splunk_registry(self):
        reg = scape.registry.Registry({
            'addc':
            scape.splunk.SplunkDataSource(
                splunk_service=self.service,
                metadata=scape.registry.TableMetadata({
                    'Source_Network_Address': {
                        'tags': ['source'],
                        'dim': 'ip',
                    },
                    'Source_Port': {
                        'tags': ['source'],
                        'dim': 'port',
                    },
                    'host': 'hostname:'
                }),
                index='addc',
                description="Test data source")
        })

        try:
            addc = reg['addc']
            with HTTMock(self.host.job_create_200, self.host.job_attr_200,
                         self.host.addc_results_200, self.host.control_200):
                for i, row in enumerate(addc.select('*').run().iter()):
                    self.assertTrue(row['host'].startswith('host'))
        except KeyboardInterrupt as err:
            set_trace()
Exemplo n.º 5
0
    def get_parma_2(self,casename):
        #扩展:针对此接口输入不同的参数,正常、异常情况
        #优化:参数获取通过读取excel
        #通过casename,获取执行用例的行号,在此行获取相关数据
        set_trace()
        sht,cols_1 = open_excel_xlsx.open_excel_xlsx()
        case_raw = cols_1.index(casename)
        case_raw_tuple = sht.Range(sht.Cells(cols_1, 1), sht.Cells(cols_1, 11)).Value
        method = case_raw_tuple[0][1]
        base_url = case_raw_tuple[0][2]
        url = case_raw_tuple[0][2]+case_raw_tuple[0][3]
        params = case_raw_tuple[0][5]
        expect_code = case_raw_tuple[0][6]
        expect_json = case_raw_tuple[0][7]
        # method='get'
        # base_url = "http://httpbin.org/"
        # url = base_url + "get?"
        # params = {"name": "akui2", "email": r"*****@*****.**"}
        # expect_code = 200
        # expect_json = {u'args': {u'email': u'*****@*****.**', u'name': u'akui2'}}
        #优化:方法调用需要封装,get、post等进行封装到子类中
        respon = requests.get(url=url, params=params)

        respon_code = respon.status_code
        respon_json = respon.json()

        #判断状态码是否相等;判断expect_json是否为respon_json的子集
        # set_trace()
        return expect_code,respon_code,expect_json,respon_json
Exemplo n.º 6
0
def bubblesort_lines(lst):
    bubble = True
    imports = list(map(get_import_object_from_line, lst))
    lines = [
        ((get_sort_key_for_import(import_) if import_ is not None else None),
         import_, line) for import_, line in zip(imports, lst)
    ]
    while bubble:
        bubble = False
        for i in range(len(lines)):
            key, import_, line = lines[i]
            if key is None:
                continue
            try:
                j = next((j for j in range(i + 1, len(lines))
                          if lines[j][0] is not None), None)
            except Exception as e:
                from nose.tools import set_trace
                set_trace()
            if j is not None:
                if lines[i][0] > lines[j][0]:
                    bubble = True
                    lines[i], lines[j] = lines[j], lines[i]
                elif j == i + 1 and not is_same_section(
                        lines[i][1], lines[j][1]):
                    lines.insert(j, (None, None, '\n'))
                    bubble = True
                elif j == i + 2 and is_same_section(
                        lines[i][1], lines[j][1]) and lines[i + 1][2] == '\n':
                    # Extra newline between sections.
                    bubble = True
                    del lines[i + 1]
                    break
    return [l for (k, imp, l) in lines]
Exemplo n.º 7
0
        def test_status_report(self):
            order = self.status_report_factory.factory_order(dispatch_number=u'1012369182')
            status_report = self.status_report_factory.factory_status_report(order=order, date=datetime.datetime.now())
            self.api_client.make_status_report_request(status_report)
            from nose.tools import set_trace

            set_trace()
def bubblesort_lines(lst):
    bubble = True
    imports = list(map(get_import_object_from_line, lst))
    lines = [((get_sort_key_for_import(import_) if import_ is not None else None), import_, line) for import_, line in zip(imports, lst)]
    while bubble:
        bubble = False
        for i in range(len(lines)):
            key, import_, line = lines[i]
            if key is None:
                continue
            try:
                j = next((j for j in range(i+1, len(lines)) if lines[j][0] is not None), None)
            except Exception as e:
                from nose.tools import set_trace; set_trace()
            if j is not None:
                if lines[i][0] > lines[j][0]:
                    bubble = True
                    lines[i], lines[j] = lines[j], lines[i]
                elif j == i + 1 and not is_same_section(lines[i][1], lines[j][1]):
                    lines.insert(j, (None, None, '\n'))
                    bubble = True
                elif j == i + 2 and is_same_section(lines[i][1], lines[j][1]) and lines[i + 1][2] == '\n':
                    # Extra newline between sections.
                    bubble = True
                    del lines[i + 1]
                    break
    return [l for (k, imp, l) in lines]
Exemplo n.º 9
0
    def process_identifier(self, identifier):
        # What is the correct medium?
        correct_medium = None
        lp = identifier.licensed_through
        for lpdm in lp.delivery_mechanisms:
            correct_medium = lpdm.delivery_mechanism.implicit_medium
            if correct_medium:
                break
        if not correct_medium and identifier.type == Identifier.OVERDRIVE_ID:
            content = self.overdrive.metadata_lookup(identifier)
            metadata = OverdriveRepresentationExtractor.book_info_to_metadata(
                content)
            correct_medium = metadata.medium

        if not correct_medium and identifier.type == Identifier.THREEM_ID:
            metadata = self.threem.bibliographic_lookup(identifier)
            correct_medium = metadata.medium

        if not correct_medium:
            set_trace()

        if lp.edition.medium != correct_medium:
            print "%s is actually %s, not %s" % (
                lp.edition.title, correct_medium, lp.edition.medium)
            lp.edition.medium = correct_medium or Edition.BOOK_MEDIUM
Exemplo n.º 10
0
 def test_packages(self):
     names = [pkg.name for pkg in model.Session.query(model.Package).all()]
     from nose.tools import set_trace; set_trace()
     assert_equal(names, [self.name])
     pkg = model.Package.by_name(self.name)
     assert pkg
     assert_equal(len(pkg.resources), self.num_resources_originally + 1)
Exemplo n.º 11
0
def test_foo():
    Foo.objects.all().delete()
    browser.open('/foo')
    set_trace()
    assert len(browser.document['#foos li']) == 0
    Foo.objects.create(dumbness=True)
    browser.open('/foo')
    assert len(browser.document['#foos li']) == 1
Exemplo n.º 12
0
def move_file(source_file, destination_file):
    try:
        os.rename(source_file, destination_file)
    except OSError as ex:
        print("Movement of the file %s failed" % source_file)
        set_trace()
    else:
        print("Successfully moved the file to %s" % destination_file)
Exemplo n.º 13
0
def copy_file(source_file, destination_file):
    try:
        shutil.copy2(source_file, destination_file)
    except OSError as ex:
        print("Copying of the file %s failed" % source_file)
        set_trace()
    else:
        print("Successfully copied the file to %s" % destination_file)
Exemplo n.º 14
0
 def test_api001_case(self):
     set_trace()
     # self.rr1,没有test_api001()属性,有'test_api001_1', 'test_api001_2',属性
     expect_code, expect_result, res_code, res_result = self.rr1.test_api001(
     )
     assert_equal(expect_code, res_code, msg=u'请求错误%d' % res_code)
     assert_dict_contains_subset(expect_result,
                                 res_result,
                                 msg=u'预期结果错误%s' % res_result)
Exemplo n.º 15
0
    def edit(self, id=None):  # allow id=None to allow posting
        c.error = ''
        authorization_group = self._get_authgroup_by_name_or_id(id)
        if authorization_group is None:
            abort(404, '404 Not Found')
        am_authz = self.authorizer.am_authorized(c, model.Action.EDIT,
                                                 authorization_group)
        if not am_authz:
            abort(401, _('User %r not authorized to edit %r') % (c.user, id))

        is_admin = self.authorizer.is_sysadmin(c.user)

        if not 'save' in request.params:
            c.authorization_group = authorization_group
            c.authorization_group_name = authorization_group.name

            fs = ckan.forms.get_authorization_group_fieldset(
                is_admin=is_admin).bind(authorization_group)
            c.form = self._render_edit_form(fs)
            return render('authorization_group/edit.html')
        else:
            # id is the name (pre-edited state)
            c.authorization_group_name = id
            # needed because request is nested
            # multidict which is read only
            params = dict(request.params)
            c.fs = ckan.forms.get_authorization_group_fieldset()\
                .bind(authorization_group, data=params or None)
            try:
                self._update(c.fs, id, authorization_group.id)
                # do not use groupname from id as may have changed
                c.authorization_group = authorization_group
                c.authorization_group_name = authorization_group.name
            except ValidationException, error:
                fs = error.args[0]
                c.form = self._render_edit_form(fs)
                return render('authorization_group/edit.html')
            user = model.User.by_name(c.user)
            users = [model.User.by_name(name) for name in \
                     request.params.getall('AuthorizationGroup-users-current')]
            authorization_group.users = list(set(users))
            usernames = request.params.getall(
                'AuthorizationGroupUser--user_name')
            for username in usernames:
                if username:
                    usr = model.User.by_name(username)
                    if usr and usr not in authorization_group.users:
                        model.add_user_to_authorization_group(
                            usr, authorization_group, model.Role.READER)
            model.repo.commit_and_remove()
            from nose.tools import set_trace
            set_trace()

            h.redirect_to(controller='authorization_group',
                          action='read',
                          id=c.authorization_group_name)
Exemplo n.º 16
0
def create_directory(path):
    if os.path.exists(path):
        return
    try:
        os.makedirs(path)
    except OSError as ex:
        print("Creation of the directory %s failed" % path)
        set_trace()
    else:
        print("Successfully created the directory %s" % path)
Exemplo n.º 17
0
def set_trace():
    """
	Use this function instead of directly importing if from pdb. The test run
	time limit will be disabled and stdout restored (so the debugger actually
	works).
	"""
    Timer.stop()

    from nose.tools import set_trace
    set_trace()
Exemplo n.º 18
0
 def api001(self):
     set_trace()
     # url=1
     # params=2
     respon = requests.get(url=url, params=params)
     respon_code = respon.status_code
     respon_json = respon.json()
     #判断状态码是否相等;判断expect_json是否为respon_json的子集
     # set_trace()
     return expect_code,respon_code,expect_json,respon_json
Exemplo n.º 19
0
def set_trace():
	"""
	Use this function instead of directly importing if from pdb. The test run
	time limit will be disabled and stdout restored (so the debugger actually
	works).
	"""
	Timer.stop()

	from nose.tools import set_trace
	set_trace()
Exemplo n.º 20
0
    def verify_all_adresses_list_works_fine(self):
        from nose.tools import set_trace
        set_trace()
        # address_list = self.my_account_page.check_visibility_of_addresses_lists()

        for heading, title in zip(MAIN_HEADING_LIST, TITLE_LIST):
            self.my_account_page.check_visibility_of_addresses_lists(
                title).click()
            self.assertEqual(
                self.my_account_page.check_heading_from_address_list(),
                heading)
Exemplo n.º 21
0
    def todo_single_iface_json(self, mock_stp_state, mock_is_bridgemem,
                               mock_symlink, mock_exec, mock_listdir,
                               mock_file, mock_read_oneline,
                               mock_os_path_exists):

        values10 = {
            ('/sys/class/net/swp3/brif', ): False,
            ('/sys/class/net/swp3', ): True,
            ('/sys/class/net/swp3/bonding', ): False,
            ('/sys/class/net/swp3/master/bonding', ): False,
            ('sys/class/net/swp3/brport/vlans', ): True,
            ('/sys/class/net/swp3/carrier', ): True,
            ('/sys/class/net/swp3.1/brport', ): True,
        }
        mock_os_path_exists.side_effect = mod_args_generator(values10)
        values4 = {
            ('/sys/class/net', ): ['swp3', 'swp3.1', 'swp3.2'],
            ('/sys/class/net/br0/brif', ): ['swp3'],
            ('/sys/class/net/br1/brif', ): ['swp3.1'],
            ('/sys/class/net/br2/brif', ): ['swp3.2']
        }
        mock_is_bridgemem.return_value = True
        mock_stp_state.return_value = '2'
        mock_listdir.side_effect = mod_args_generator(values4)
        values3 = {
            ('lspci -nn', ):
            '',
            ('/sbin/ethtool -S swp3', ):
            '',
            ('/sbin/mstpctl showall', ):
            open('tests/test_netshowlib/mstpctl_showall').read(),
            ('/usr/sbin/lldpctl -f xml', ):
            '<xml></xml>'
        }
        mock_exec.side_effect = mod_args_generator(values3)
        values = {
            ('bridge/stp_state', ): '2',
            ('brport/vlans', ): None,
            ('/sys/class/net/swp3/carrier', ): '1',
            ('/sys/class/net/swp3/speed', ): '1000',
            ('/sys/class/net/swp3/brport/vlans', ): None
        }
        mock_read_oneline.side_effect = mod_args_generator(values)
        values5 = {
            ('/sys/class/net/swp3/brport/bridge', ): 'br0',
            ('/sys/class/net/swp3.1/brport/bridge', ): 'br1',
            ('/sys/class/net/swp3.2/brport/bridge', ): 'br2'
        }
        mock_symlink.side_effect = mod_args_generator(values5)
        self.showint.single_iface = 'swp3'
        self.showint.use_json = True
        _output = self.showint.print_single_iface()
        from nose.tools import set_trace
        set_trace()
Exemplo n.º 22
0
def set_trace():
	"""
	Use this function instead of directly importing if from pdb. The test run
	time limit will be disabled and stdout restored (so the debugger actually
	works).
	"""
	if TEST_TIMELIMIT:
		signal.alarm(0)

	from nose.tools import set_trace
	set_trace()
Exemplo n.º 23
0
    def request_post_session(self, datas):
        # set_trace()
        session = requests.session()
        result = session.get("http://127.0.0.1:5000/login")
        soup = BeautifulSoup(result.text, "html.parser")
        inputs = soup.find_all("input", id="csrf_token")
        csrf_token = inputs[0].attrs["value"]
        data = {
            "csrf_token": csrf_token,
            "email": "*****@*****.**",
            "password": "******"
        }
        result = session.post("http://127.0.0.1:5000/login", data=data)
        if "Logged in successfully" in result.text:
            print '14:ok'
        else:
            print '14:error'
        cookies = result.cookies
        cookies_input = {}
        cookies_input[cookies.items()[0][0]] = cookies.items()[0][1]

        base_url = datas['base_url']
        api_command = datas['api_command']
        url = base_url + api_command
        data = datas['params']
        if data != '':
            data = eval(datas['params'])
            print 'ok'
        else:
            data = ''
            print 'nothing'
        # set_trace()
        data["csrf_token"] = csrf_token
        headers = datas['headers']
        headers = eval(
            headers
        )  # unicode专业为字典https://www.cnblogs.com/OnlyDreams/p/7850920.html

        #-------------------
        # headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:40.0) Gecko/20100101 Firefox/40.0","Accept": '"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"',}
        # data = {"csrf_token": csrf_token, "nickname": "xu123", "about_me": "20190911502:xu123"}

        # -----------------
        set_trace()
        res = session.post(url=url,
                           data=data,
                           headers=headers,
                           cookies=cookies_input)
        res_code = res.status_code
        if "Your changes have been saved" in res.text:
            res_result = "Your changes have been saved"
        else:
            print 'error'
        return res_code, res_result
Exemplo n.º 24
0
def test_get_spectrum_ones():
    carr = np.zeros((200,101),dtype='F')+1.0j
    sq_arr = np.ones((200, 200), dtype='f')
    npts = 101

    approx = (np.pi*x)/npts*x[-1] + sp[0]

    assert np.allclose(sum(sp), sum(abs(carr.flatten())))
    import pylab as pl
    pl.ion()
    pl.plot(x, sp)
    set_trace()
Exemplo n.º 25
0
def test_simple_split():
    args = 'transform.py --props "{}" -t test/account_sales_rep -s "2018-03-01" -e "2018-03-02"'.format(
        CONFIG_FILE)
    args = args.split()
    with temp_sys_args(*args), mock_eravana_config():
        app = CronGDWTransform(props=self.config)
    from nose.tools import set_trace
    set_trace()
    app.run()
    chase_ract0010 = pd.DataFrame({'merchant_order_num': ['1', '2', '3', '4']})
    assert chase_ract0010['merchant_order_num'].equals(
        pd.Series(['1', '2', '3', '4']))
Exemplo n.º 26
0
 def test_itemset_learner(self):
     projects = get_all_projects()
     ant = projects['camel']
     ant_df = pd.concat([pd.read_csv(ant_file)
                         for ant_file in ant.data], ignore_index=True)
     ant_df = ant_df[ant_df.columns[1:]]
     ant_df.loc[ant_df[ant_df.columns[-1]] > 0, ant_df.columns[-1]] = 1
     X = ant_df[ant_df.columns[:-1]]
     y = ant_df[ant_df.columns[-1]]
     isl = ItemSetLearner()
     isl = isl.fit(X, y)
     frequent_items = isl.transform()
     set_trace()
Exemplo n.º 27
0
def test_rp():
    def inner(npts, nprofile, sigma, maxrad, useG):
        Y = np.linspace(0.0, 1.0, npts, endpoint=False)
        Y -= .5
        X = Y[:,np.newaxis]
        if useG:
            arr = np.exp(-(X**2 + Y**2) / (2. * sigma * sigma))
            rp_calc = np.exp(-X[:,0]**2 / (2. * sigma * sigma))
            center = np.where(arr == arr.max())
            center_x, center_y = float(center[0]) / npts, float(center[1]) / npts
        else:
            arr = np.ones((npts, npts))
            rp_calc = arr[:,0]
            center_x, center_y = 0.5, 0.5
        profile = np.empty(nprofile, dtype=np.double)
        rad_prof(arr=arr, scale=1.0,
                 center_x=center_x, center_y=center_y,
                 max_rad=maxrad, profile=profile)
        return arr, profile, rp_calc

    maxrad = 1.0
    nprofile = 100
    arr1, pro1, rp_calc1 = inner(100, nprofile, 0.05, maxrad, useG=True)
    arr2, pro2, rp_calc2 = inner(100, nprofile, 0.1, maxrad, useG=True)

    X1 = np.linspace(0, maxrad, nprofile)
    X_calc = np.linspace(0, maxrad, len(rp_calc1[50:]))

    # print arr1.sum(), pro1.sum()
    # print arr2.sum(), pro2.sum()

    # slopes = [(pro1[i] - pro1[0]) / (X1[i] - X1[0]) for i in range(1, len(pro1))]
    # print slopes

    import pylab as pl
    pl.ion()
    pl.plot(X1, pro1)
    pl.plot(X_calc, rp_calc1[50:])
    # from pprint import pprint
    # pprint(zip(X1, pro1))
    # print pro1[:50] / rp_calc1[50:]
    pl.figure()
    pl.plot(X1, pro2)
    pl.plot(X_calc, rp_calc2[50:])
    # print rp_calc2[49:] / pro2[:50]
    set_trace()


    ok_(np.allclose(np.sum(pro1), arr1.sum()))
    ok_(np.allclose(np.sum(pro2), arr2.sum()))
Exemplo n.º 28
0
 def test_build_tree(self):
     projects = get_all_projects()
     ant = projects['ant']
     test_df = pd.read_csv(ant.data[-1])
     train_df = pd.concat(
         [pd.read_csv(ant_file) for ant_file in ant.data[:-1]])
     train_df.loc[train_df[train_df.columns[-1]] > 0,
                  train_df.columns[-1]] = 1
     X_train = train_df[train_df.columns[1:]]
     X_test = test_df[test_df.columns[1:]]
     xtree = XTREE()
     xtree.fit(X_train)
     xtree.predict(X_test)
     set_trace()
    def todo_single_iface_json(self,
                               mock_stp_state,
                               mock_is_bridgemem,
                               mock_symlink,
                               mock_exec,
                               mock_listdir,
                               mock_file,
                               mock_read_oneline,
                               mock_os_path_exists):

        values10 = {('/sys/class/net/swp3/brif',): False,
                    ('/sys/class/net/swp3',): True,
                    ('/sys/class/net/swp3/bonding',): False,
                    ('/sys/class/net/swp3/master/bonding',): False,
                    ('sys/class/net/swp3/brport/vlans',): True,
                    ('/sys/class/net/swp3/carrier',): True,
                    ('/sys/class/net/swp3.1/brport',): True,
                    }
        mock_os_path_exists.side_effect = mod_args_generator(values10)
        values4 = {('/sys/class/net',): ['swp3', 'swp3.1', 'swp3.2'],
                   ('/sys/class/net/br0/brif',): ['swp3'],
                   ('/sys/class/net/br1/brif',): ['swp3.1'],
                   ('/sys/class/net/br2/brif',): ['swp3.2']
                   }
        mock_is_bridgemem.return_value = True
        mock_stp_state.return_value = '2'
        mock_listdir.side_effect = mod_args_generator(values4)
        values3 = {('lspci -nn',): '',
                   ('/sbin/ethtool -S swp3', ): '',
                   ('/sbin/mstpctl showall',): open(
                       'tests/test_netshowlib/mstpctl_showall').read(),
                   ('/usr/sbin/lldpctl -f xml',): '<xml></xml>'}
        mock_exec.side_effect = mod_args_generator(values3)
        values = {('bridge/stp_state',): '2',
                  ('brport/vlans',): None,
                  ('/sys/class/net/swp3/carrier',): '1',
                  ('/sys/class/net/swp3/speed',): '1000',
                  ('/sys/class/net/swp3/brport/vlans',): None}
        mock_read_oneline.side_effect = mod_args_generator(values)
        values5 = {
            ('/sys/class/net/swp3/brport/bridge',): 'br0',
            ('/sys/class/net/swp3.1/brport/bridge',): 'br1',
            ('/sys/class/net/swp3.2/brport/bridge',): 'br2'
        }
        mock_symlink.side_effect = mod_args_generator(values5)
        self.showint.single_iface = 'swp3'
        self.showint.use_json = True
        _output = self.showint.print_single_iface()
        from nose.tools import set_trace; set_trace()
Exemplo n.º 30
0
 def edit(self, id=None): # allow id=None to allow posting
     c.error = ''
     authorization_group = self._get_authgroup_by_name_or_id(id)
     if authorization_group is None:
         abort(404, '404 Not Found')
     am_authz = self.authorizer.am_authorized(c, model.Action.EDIT, authorization_group)
     if not am_authz:
         abort(401, _('User %r not authorized to edit %r') % (c.user, id))
         
     is_admin = self.authorizer.is_sysadmin(c.user)
     
     if not 'save' in request.params:
         c.authorization_group = authorization_group
         c.authorization_group_name = authorization_group.name
         
         fs = ckan.forms.get_authorization_group_fieldset(is_admin=is_admin).bind(authorization_group)
         c.form = self._render_edit_form(fs)
         return render('authorization_group/edit.html')
     else:
         # id is the name (pre-edited state)
         c.authorization_group_name = id
         # needed because request is nested
         # multidict which is read only
         params = dict(request.params)
         c.fs = ckan.forms.get_authorization_group_fieldset()\
             .bind(authorization_group, data=params or None)
         try:
             self._update(c.fs, id, authorization_group.id)
             # do not use groupname from id as may have changed
             c.authorization_group = authorization_group
             c.authorization_group_name = authorization_group.name
         except ValidationException, error:
             fs = error.args[0]
             c.form = self._render_edit_form(fs)
             return render('authorization_group/edit.html')
         user = model.User.by_name(c.user)
         users = [model.User.by_name(name) for name in \
                  request.params.getall('AuthorizationGroup-users-current')]
         authorization_group.users = list(set(users))
         usernames = request.params.getall('AuthorizationGroupUser--user_name')
         for username in usernames:
             if username:
                 usr = model.User.by_name(username)
                 if usr and usr not in authorization_group.users:
                     model.add_user_to_authorization_group(usr, authorization_group, model.Role.READER)
         model.repo.commit_and_remove()
         from nose.tools import set_trace; set_trace()
         
         h.redirect_to(controller='authorization_group', action='read', id=c.authorization_group_name)
Exemplo n.º 31
0
class Test_api003(object):
    try:

        @classmethod
        def setUpClass(cls):
            logger.info(u'Test_api003:类开始...')

        @classmethod
        def tearDownClass(cls):
            logger.info(u'Test_api003:类结束...')

        def setUp(self):
            logger.info(u'用例开始...')

        def tearDwon(self):
            logger.info(u'用例结束...')

        def test_api003(self):
            # set_trace()
            logger.info(u'失败用例:test_api003')
            assert_equal(1, 2, msg=u'1 !=2')  # 用例执行失败,错误信息会记录xml和html报告中

        # print(1 / 0)  # 为了触发错误,跳转到expec处,捕获异常,记录日志
    except:  # 捕获所有异常,并且记录在日志文件中(非用例执行失败的代码错误)
        set_trace()
        logger.info("如果存在错误,已记录在日志文件中")
        logger.exception(sys.exc_info())  # 将错误信息记录在日志文件中
Exemplo n.º 32
0
    def json_get_json(self, url, timeout=5, **kwargs):
        '''

        :param url: 位置参数,必填
        :param timeout: 默认参数,非必填,默认为5
        :param kwargs:
        :return:
        '''
        set_trace()
        # url = kwargs.get('url')
        parmas = kwargs.get('params')
        headers = kwargs.get('headers')
        res = requests.get(url=url,
                           timeout=timeout,
                           params=params,
                           headers=headers)
        return res
Exemplo n.º 33
0
def handshake_pre76(headers, base_response):
    """The websocket handshake as described in version 75 of the spec [ws75]_

    :param headers: The request headers from :func:`websocket_handshake`
    :param base_response: The headers common across different spec versions

    .. note:: ``base_response`` is provided by
        :func:`websocket_handshake`
    """
    try:
        base_response += ("WebSocket-Origin: %s\r\n"
                          "WebSocket-Location: %s\r\n\r\n" \
                                % (headers['Origin'], build_location_url(headers)))
    except KeyError:
        from nose.tools import set_trace; set_trace()
        raise HandShakeFailed("'Host' not in headers")
    return base_response
Exemplo n.º 34
0
    def test_short(self):
        url = 'http://127.0.0.1:8080/invocations'
        headers = {'Content-Type': 'text/csv'}
        data = (
            'tripduration,starttime,stoptime,start station id,start station name,start station latitude,start station longitude,end station id,end station name,end station latitude,end station longitude,bikeid,usertype,birth year,gender\n'
            '171,10/1/2015 00:00:02,10/1/2015 00:02:54,388,W 26 St & 10 Ave,40.749717753,-74.002950346,494,W 26 St & 8 Ave,40.74734825,-73.99723551,24302,Subscriber,1973.0,1\n'
            '593,10/1/2015 00:00:02,10/1/2015 00:09:55,518,E 39 St & 2 Ave,40.74780373,-73.97344190000001,438,St Marks Pl & 1 Ave,40.72779126,-73.98564945,19904,Subscriber,1990.0,1\n'
        )

        from nose.tools import set_trace
        set_trace()

        r = requests.post(url, data=data, headers=headers)
        assert r.status_code/100 == 2, \
                'got this instead, r.status_code ' + str(r.status_code)

        pass
Exemplo n.º 35
0
    def test_basic(self):
        from nose.tools import set_trace
        set_trace()

        csvdata = '10/1/2015 00:00:02,E 39 St & 2 Ave,Subscriber,1990,1'

        df = blc.hydrate_and_widen(csvdata)

        bundle, datasets, stations_df = bltu.make_basic_minimal_model()

        _, X_test = blc.df_to_np_for_clf(bundle,
                                         df,
                                         stations_df,
                                         labeled=False)
        clf = bundle['clf']

        path = clf.decision_path(X_test)
Exemplo n.º 36
0
    def test_upload_photo_bad(self):
        with self.testApp as app:
            r = app.post('/auth', 
                            data={'username': self.testUser.get('username'),
                                  'password': self.testUser.get('password') })

            photo = StringIO('some file contents')
            from nose.tools import set_trace; set_trace()
            resp = app.post('/api/profile/photo', 
                            data={'photo': (photo, 'test_photo_upload.txt')})
            
            assert 'phone_verify_id' in session
            assert 'success' in resp.data
            
            phone_verify_id = session['phone_verify_id']
            token = cdw.phoneverifications.with_id(phone_verify_id).token
            r = app.post('/verify/code', data={'code':token})    
            assert 'verified_phone' in session
            assert 'success' in r.data
Exemplo n.º 37
0
    def test_api_profile_register(self):
        params = {
            "username": "******",
            "email": "*****@*****.**",
            "phoneNumber": "9015551212",
            "password": "******"
        }
        url = '/api/register'

        resp = self.testApp.post(url,
                                 data=json.dumps(params),
                                 content_type="application/json")

        self.assert_ok_json(resp)
        self.assertTrue('Set-Cookie' in resp.headers.keys())
        cookie = resp.headers.get('Set-Cookie')
        self.assertTrue("login="******"Unable to find login cookie in response")
        from nose.tools import set_trace
        set_trace()
Exemplo n.º 38
0
    def enter_time(self, **kwargs):
        """

        :param kwargs: * values have the current hour which is fetched from dict
                       * value1 has the current minute which is fetched from dict
        :return:
        """
        from nose.tools import set_trace
        set_trace()
        hr = self.driver.find_element_by_css_selector(
            "input[aria-label='Hour']")
        hr.click()
        # hr is hour field
        hr.send_keys(Keys.BACKSPACE)
        hr.send_keys(kwargs.get('fist_value'))
        mn = self.driver.find_element_by_css_selector(
            "input[aria-label='Minute']")
        # min is minute field
        mn.send_keys(Keys.BACKSPACE)
        mn.send_keys(kwargs.get('second_value'))
Exemplo n.º 39
0
    def test_init_state_equal(self):
        """test that after init CSV and JSON generators have equal 'native_language', 'languages', 'templates', 'tables' attrubytes.

        This is the only test needed because if state after init is the same then
        behaviour is the same.

        """
        json_generator = self.TestJSONGenerator()
        csv_generator = self.TestCSVGenerator()

        for attr_name in ['native_language', 'languages', 'templates', 'tables']:
            try:
                json_attr = getattr(json_generator, attr_name)
                csv_attr = getattr(csv_generator, attr_name)
                if isinstance(json_attr, list):
                    self.assertItemsEqual(csv_attr, json_attr)
                else:
                    self.assertEqual(csv_attr, json_attr)
            except Exception:
                from nose.tools import set_trace; set_trace()
                raise
def _test_reeb3():
    surf_net = _cp.netx.DiGraph()
    surf_net._g = {
            (5, 15): [(22, 28), (6, 15)],
            (5, 31): [(6, 25), (26, 6), (10, 2)],
            (6, 15): [(27, 0), (5, 15), (6, 16)],
            (6, 16): [(6, 25), (14, 9), (6, 15)],
            (6, 25): [(27, 0), (21, 18), (6, 16), (5, 31)],
            (10, 2): [(21, 18), (27, 0), (5, 31), (16, 3)],
            (14, 9): [(27, 0), (21, 18), (16, 3), (6, 16)],
            (16, 3): [(22, 28), (14, 9), (26, 6), (10, 2)],
            (21, 18): [(6, 25), (22, 28), (14, 9), (26, 6), (10, 2)],
            (22, 28): [(21, 18), (27, 0), (5, 15), (16, 3)],
            (26, 6): [(27, 0), (21, 18), (16, 3), (5, 31)],
            (27, 0): [(6, 25), (22, 28), (14, 9), (26, 6), (6, 15), (10, 2)],
            }
    crit_pts =  {
            'passes': set([(6, 15), (6, 25), (10, 2), (14, 9), (22, 28), (26, 6)]),
            'peaks': set([(21, 18), (27, 0)]),
            'pits': set([(5, 15), (5, 31), (6, 16), (16, 3)]),
            }
    node2h_map = {
            (5, 31): (-0.014356736424827819, (5, 31)),
            (5, 15): (-0.027839962424853988, (5, 15)),
            (6, 25): (-0.0055548455026826482, (6, 25)),
            (6, 15): (-0.027593845591386536, (6, 15)),
            (6, 16): (-0.027642410602253194, (6, 16)),
            (10, 2): (-0.0075859636696798422, (10, 2)),
            (14, 9): (-0.0072652858280034555, (14, 9)),
            (16, 3): (-0.012332851686022498, (16, 3)),
            (21, 18): (0.039444128294746132, (21, 18)),
            (22, 28): (0.0016904116601130521, (22, 28)),
            (26, 6): (0.0064175915669918903, (26, 6)),
            (27, 0): (0.011876681723300701, (27, 0)),
            }
    node2h = lambda n: node2h_map[n]
    reeb_gr = _cp.get_reeb_graph(surf_net, crit_pts, node2h)
    set_trace()
Exemplo n.º 41
0
    def insert_rows(self, table_name, rows):
        if table_name.lower() not in self.insert_statements.keys():
            raise Exception(table_name, 'Do not know how to insert this type of record')

        connection = odbc.connect(self.connection_string)
        cursor = connection.cursor()

        command = self.insert_statements[table_name.lower()]

        i = 1
        start = 0
        end = self.batch_size
        try:
            print 'total rows to insert {}'.format(len(rows))

            while start < len(rows):
                batched_rows = rows[start:end]

                cursor.executemany(command, batched_rows)
                connection.commit()

                i = i + 1
                start = end
                end = i * self.batch_size + 1
        except:
            # import pprint
            # pp = pprint.PrettyPrinter(indent=4)
            # pp.pprint(batched_rows)

            print '{} {}-{}'.format(table_name, start, end)

            from nose.tools import set_trace
            set_trace()

            raise
        finally:
            cursor.close()
            connection.close()
    def process_identifier(self, identifier):
        # What is the correct medium?
        correct_medium = None
        lp = identifier.licensed_through
        for lpdm in lp.delivery_mechanisms:
            correct_medium = lpdm.delivery_mechanism.implicit_medium
            if correct_medium:
                break
        if not correct_medium and identifier.type==Identifier.OVERDRIVE_ID:
            content = self.overdrive.metadata_lookup(identifier)
            metadata = OverdriveRepresentationExtractor.book_info_to_metadata(content)
            correct_medium = metadata.medium

        if not correct_medium and identifier.type==Identifier.THREEM_ID:
            metadata = self.threem.bibliographic_lookup(identifier)
            correct_medium = metadata.medium

        if not correct_medium:
            set_trace()

        if lp.edition.medium != correct_medium:
            print "%s is actually %s, not %s" % (lp.edition.title, correct_medium, lp.edition.medium)
            lp.edition.medium = correct_medium or Edition.BOOK_MEDIUM
Exemplo n.º 43
0
def projector_planes(proj):
    """
    Function: projector_planes
    Generates the planes representing each row and column of the projector

    Parameters:
    proj - *<DLPProjector>* The projector to generate the planes of

    Returns:
    *Tuple* in the form (vertical_planes, horizontal_planes). row_planes will have shape
    (width, 4) and col_planes will have shape (height, 4).  Each plane is defined
    by the coefficients [a, b, c, d], where a*x + b*y + c*z = d
    """
    # generate pixel rays in the projector frame
    proj_pixels_proj_frame = pixel_rays(proj)
    from nose.tools import set_trace; set_trace()
    # translate and rotate the rays into the global frame
    proj_pixels = to_global_frame(proj_pixels_proj_frame, proj)

    # get projector location in the global frame
    proj_pose = to_global_frame([0, 0, 0], proj)

    # add projector location to the vertical points
    vertical_shape = (proj.resolution[0], 1, 3)
    proj_points_vertical = np.concatenate((proj_pixels, np.ones(vertical_shape) * proj_pose), axis=1)

    # calculate the vertical planes
    proj_planes_vertical = fit_plane(proj_points_vertical)

    # add projector location to the horizontalumn points
    horizontal_shape = (1, proj.resolution[1], 3)
    proj_points_horizontal = np.concatenate((proj_pixels, np.ones(horizontal_shape) * proj_pose), axis=0)

    # calculate the horizontalumn planes
    proj_planes_horizontal = fit_plane(np.transpose(proj_points_horizontal, (1, 0, 2)))

    return proj_planes_vertical, proj_planes_horizontal
Exemplo n.º 44
0
def console():
  from nose.tools import set_trace; set_trace()
  import code; code.interact(local=locals())
Exemplo n.º 45
0
 def debug(self):
   from nose.tools import set_trace; set_trace()
Exemplo n.º 46
0
import nose
from nose.tools import set_trace; set_trace()


def run_tests():
    nose.main()


if __name__ == "__main__":
    run_tests()
Exemplo n.º 47
0
# -------------------------------------------------------------------------------
# _*_encoding:utf-8_*_
# Name:        $[ActiveDoc-Name]
# Purpose:
#
# Author:      $[UserName]
#
# Created:     $[DateTime-'DD/MM/YYYY'-DateFormat]
# Copyright:   (c) $[UserName] $[DateTime-'YYYY'-DateFormat]
# Licence:     <your licence>
# -------------------------------------------------------------------------------
# !/usr/bin/env python
# conding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from nose.tools import set_trace
from nose.tools import assert_equal
import json
set_trace()
a = {'a': 'aa'}
b = {'b': 'bb'}
assert_equal(a, b, msg=u'a:%s;b:%s' % (json.dumps(a), json.dumps(b)))
Exemplo n.º 48
0
import kestrel
import json

c = kestrel.Client(['localhost:22133'])

json_data = open('/home/diana/ami/amilab-basis/test/measurements_sample.json')
message = json.load(json_data)
from nose.tools import set_trace; set_trace()
c.add('measurements', json.dumps(message))
Exemplo n.º 49
0
def extract_point_cloud(initial_images, vertical_stripe_images, horizontal_stripe_images, min_contrast=0.2):
    """
    Function: extract_point_cloud
    Takes a series of images with gray codes projected on them and produces a point cloud.

    Parameters:

    initial_images - *[<Image>]* image of an all white projection followed by an all black projection
    vertical_stripe_images - *[<Image>]* Images with vertical strip projections as described in <gray_code_estimates>
    horizontal_stripe_images - *[<Image>]* Images with horizontal strip projections as described in <gray_code_estimates>
    
    Returns:
    *<PointCloud>* A PointCloud object with the scan data
    """
    if len(initial_images) % 2 != 0:
        raise Exception("For initial_images, must have an all white projection followed " +
                        "by an all black projection")

    # alias projector and camera objects
    proj = initial_images[0].patterns[0].projected_patterns[0][1]
    cam = initial_images[0].camera

    # get camera pixel rays
    cam_rays = to_global_frame(pixel_rays(cam), cam)

    # get cam location
    cam_pose = to_global_frame([0, 0, 0], cam)

    # get row projection indices from vertical stripe pattenrs
    gray_code_row, pixel_mask_row = gray_code_estimates(vertical_stripe_images, min_contrast)
    
    # get column projection indices from horizontal stripe patterns
    gray_code_col, pixel_mask_col = gray_code_estimates(horizontal_stripe_images, min_contrast)

    if gray_code_row == None and gray_code_col == None:
        raise Exception("Must have images to scan from!")

    # combine pixel masks from columns and rows (i.e., if a pixel was invalid
    # from the column or row data, make it invalid for both)
    if pixel_mask_row != None:
        if pixel_mask_col != None:
            pixel_mask = pixel_mask_row & pixel_mask_col
        else:
            pixel_mask = pixel_mask_row
    else:
        pixel_mask = pixel_mask_col        

    pixel_mask = pixel_mask_row
    # also invalidate pixels if difference between original first and second
    # image (black projection and whit projetion) doesn't exceed contrast
    # ratio
    gray_1 = cv2.cvtColor(initial_images[0].data, cv2.COLOR_RGB2GRAY)
    gray_2 = cv2.cvtColor(initial_images[1].data, cv2.COLOR_RGB2GRAY)
    pixel_mask[np.abs(gray_1.astype(np.int16) - gray_2.astype(np.int16)) <= 255 * min_contrast] = False

    # also invalidate pixel if any the calculated gray code row is greater than
    # the projection row, or if the calculated gray code column is greater than
    # the gray code column
    if gray_code_row != None:
        pixel_mask[gray_code_row >= proj.resolution[0]] = False
    if gray_code_col != None:
        pixel_mask[gray_code_col >= proj.resolution[1]] = False

    # get plane equations for every projector row and column
    proj_planes_vert, proj_planes_horz = projector_planes(proj)
    from nose.tools import set_trace; set_trace()
    # TODO: vectorize this next part
    # calculate all the points
    points = []
    for i in range(cam.resolution[0]):
        for j in range(cam.resolution[1]):
            if pixel_mask[i,j]:
                if gray_code_row != None:
                    p_row = line_plane_intersection(cam_pose, cam_rays[i,j],
                                                    proj_planes_vert[gray_code_row[i,j]])
                else:
                    p_row = None
                    
                if gray_code_col != None:
                    p_col = line_plane_intersection(cam_pose, cam_rays[i,j],
                                                    proj_planes_horz[gray_code_col[i,j]])
                else:
                    p_col = None
                    
                if p_row != None:
                    points.append(p_row)

    p = PointCloud()
    p.from_array(np.array(points, dtype=np.float32))
    return p
Exemplo n.º 50
0
 def test_publish_as_configured(self):
     data = _randstr()
     nt.set_trace()
     ps.publish(data)
     mockpn.publish.assert_called_once()
Exemplo n.º 51
0
#导入request请求的类
from request.request_method import Request_method
# set_trace()
# 具体Environment参数可自行设置
allure.environment(app_package='com.mobile.fm')
allure.environment(app_activity='com.mobile.fm.activity')
allure.environment(device_name='aad464')
allure.environment(platform_name='Android')
p1 = sys.getdefaultencoding()
import sys
reload(sys)
sys.setdefaultencoding('utf8')
p2 = sys.getdefaultencoding()
print p1
print p2
'''
问题:
w10系统,cmd运行下面代码
set_trace()
logger.info(u'Test_api006接口开始测试...')
报错:Failure: IOError ([Errno 0] Error) ... ERROR
解决方法:
方法1:在代码中添加下面代码
import sys
reload(sys)
sys.setdefaultencoding('utf8')
补充:大部分情况方法1可以解决,解决不了时,使用此方法:在logger.info(u'Test_api006接口开始测试...')之后使用settrace,不要在它前面使用settrace
方法2:不要在logger.info包含中文的前面加断点set_trace()
方法3:logger.info打印信息不要包含中文
    
'''
Exemplo n.º 52
0
 def private_network_delete(self):
     set_trace()
     self.delete_network(self.private_network_id)
     self.delete_network(self.private_network_id1)
     self.delete_network(self.private_network_id2)
Exemplo n.º 53
0
 def test_division_0_2(self):
     """测试被除数为0
     """
     set_trace()
     assert_raises(ZeroDivisionError, self.calc.division, 24, 0)
def test_model():
    app, db, admin = setup()

    Book, Publisher, Author = create_models(db)

    view = BookModelView(Book)

    admin.add_view(view)

    eq_(view.model, Book)
    eq_(view.name, 'Book')
    eq_(view.endpoint, 'bookview')

    eq_(view._primary_key, 'id')

    # None of the complex fields can be sorted on
    ok_('title' in view._sortable_columns)

    # Verify form
    eq_(view._create_form_class.title.field_class, fields.StringField)
    eq_(view._create_form_class.tags.field_class, fafields.InlineFieldList)
    eq_(view._create_form_class.authors.field_class, fafields.InlineFieldList)
    eq_(view._create_form_class.publishers.field_class, ModelSelectMultipleField)
    eq_(view._create_form_class.info.field_class, DictField)

    publisher = Publisher(refid=1, 
                          active=True)
    publisher.save()
    author = Author(name='name', 
                    email='*****@*****.**')

    # Make some test clients
    with app.test_client() as client:
        rv = client.get('/admin/bookview/')
        eq_(rv.status_code, 200)

        rv = client.get('/admin/bookview/new/')
        eq_(rv.status_code, 200)

        rv = client.post('/admin/bookview/new/',
                         data=dict(title= 'test',
                                   tags= ["a,b,c"],
                                   publishers= [str(publisher.id)],
                                   authors= [author]
                              ))
        eq_(rv.status_code, 302)

        from nose.tools import set_trace; set_trace()
        book = Book.objects.first()
        ok_(book.title == 'test')

        ok_(book.tags == ['a','b','c'])
        ok_(book.publishers[0].id == publisher.id)
        ok_(book.authors[0].name == 'name')
        ok_(book.authors[0].email == '*****@*****.**')

        # Update the list of tags
        rv = client.post('/admin/bookview/edit/?id=%s' % book.id,
                         data={'tags': ['c','b','a']})
        eq_(rv.status_code, 200)
        edbook = Book.objects().with_id(book.id)
        ok_(book.tags == ['c','b','a'])