Beispiel #1
0
def test_init():
    bio = BytesIO()
    shape = [2, 3, 4]
    dtype = np.int32
    arr = np.arange(24, dtype=dtype).reshape(shape)
    bio.seek(16)
    bio.write(arr.tostring(order='F'))
    hdr = FunkyHeader(shape)
    ap = ArrayProxy(bio, hdr)
    assert_true(ap.file_like is bio)
    assert_equal(ap.shape, shape)
    # shape should be read only
    assert_raises(AttributeError, setattr, ap, 'shape', shape)
    # Get the data
    assert_array_equal(np.asarray(ap), arr)
    # Check we can modify the original header without changing the ap version
    hdr.shape[0] = 6
    assert_not_equal(ap.shape, shape)
    # Data stays the same, also
    assert_array_equal(np.asarray(ap), arr)
    # C order also possible
    bio = BytesIO()
    bio.seek(16)
    bio.write(arr.tostring(order='C'))
    ap = CArrayProxy(bio, FunkyHeader((2, 3, 4)))
    assert_array_equal(np.asarray(ap), arr)
    # Illegal init
    assert_raises(TypeError, ArrayProxy, bio, object())
Beispiel #2
0
def test_TimeArray_bool():
    time1 = ts.TimeArray([1, 2, 3], time_unit="s")
    time2 = ts.TimeArray([1000, 2000, 3000], time_unit="ms")
    bool_arr = np.ones(time1.shape, dtype=bool)
    npt.assert_equal(time1, time2)
    npt.assert_equal(bool_arr, time1 == time2)
    nt.assert_not_equal(type(time1 == time2), ts.TimeArray)
 def test_if_select_with_namedplace_city_province_country_is_ok(self):
     query = "SELECT cdb_geocode_namedplace_point(city,province,country) " \
         "as geometry FROM {0} LIMIT 1&api_key={1}".format(
         self.env_variables['table_name'],
         self.env_variables['api_key'])
     geometry = IntegrationTestHelper.execute_query(self.sql_api_url, query)
     assert_not_equal(geometry['geometry'], None)
Beispiel #4
0
def test_posterior_transitions_w_training():
	sequences = [ list(x) for x in ( 'A', 'ACT', 'GGCA', 'TACCTGT' ) ]
	indices = { state.name: i for i, state in enumerate( model.states ) }

	transitions = model.dense_transition_matrix()
	i0, i1, i2 = indices['I0'], indices['I1'], indices['I2']
	d1, d2, d3 = indices['D1'], indices['D2'], indices['D3']
	m1, m2, m3 = indices['M1'], indices['M2'], indices['M3']

	assert_equal( transitions[d1, i1], transitions[d2, i2] )
	assert_equal( transitions[i0, i0], transitions[i1, i1] )
	assert_equal( transitions[i0, i0], transitions[i2, i2] )
	assert_equal( transitions[i0, m1], transitions[i1, m2] )
	assert_equal( transitions[d1, d2], transitions[d2, d3] )
	assert_equal( transitions[i0, d1], transitions[i1, d2] )
	assert_equal( transitions[i0, d1], transitions[i2, d3] )

	model.train( sequences )
	transitions = model.dense_transition_matrix()

	assert_not_equal( transitions[d1, i1], transitions[d2, i2] )
	assert_not_equal( transitions[i0, m1], transitions[i1, m2] )
	assert_not_equal( transitions[d1, d2], transitions[d2, d3] )
	assert_not_equal( transitions[i0, d1], transitions[i1, d2] )
	assert_not_equal( transitions[i0, d1], transitions[i2, d3] )
def test_customer_bank_accounts_create_new_idempotency_key_for_each_call():
    fixture = helpers.load_fixture('customer_bank_accounts')['create']
    helpers.stub_response(fixture)
    helpers.client.customer_bank_accounts.create(*fixture['url_params'])
    helpers.client.customer_bank_accounts.create(*fixture['url_params'])
    assert_not_equal(responses.calls[0].request.headers.get('Idempotency-Key'),
                     responses.calls[1].request.headers.get('Idempotency-Key'))
Beispiel #6
0
def test_fetch_atlas_schaefer_2018():
    valid_n_rois = [100, 200, 300, 400, 500, 600, 800, 1000]
    valid_yeo_networks = [7, 17]
    valid_resolution_mm = [1, 2]

    assert_raises(ValueError, atlas.fetch_atlas_schaefer_2018, n_rois=44)
    assert_raises(ValueError, atlas.fetch_atlas_schaefer_2018, yeo_networks=10)
    assert_raises(ValueError, atlas.fetch_atlas_schaefer_2018, resolution_mm=3)

    for n_rois, yeo_networks, resolution_mm in \
            itertools.product(valid_n_rois, valid_yeo_networks,
                              valid_resolution_mm):
        data = atlas.fetch_atlas_schaefer_2018(n_rois=n_rois,
                                               yeo_networks=yeo_networks,
                                               resolution_mm=resolution_mm,
                                               data_dir=tst.tmpdir,
                                               verbose=0)
        assert_not_equal(data.description, '')
        assert_true(isinstance(data.maps, _basestring))
        assert_true(isinstance(data.labels, np.ndarray))
        assert_equal(len(data.labels), n_rois)
        assert_true(data.labels[0].astype(str).startswith("{}Networks".
                                              format(yeo_networks)))
        img = nibabel.load(data.maps)
        assert_equal(img.header.get_zooms()[0], resolution_mm)
        assert_true(np.array_equal(np.unique(img.dataobj),
                                   np.arange(n_rois+1)))
def test_mx_lookup(ld, cmx):
    # has MX, has MX server
    ld.return_value = ['mx1.fake.mailgun.com', 'mx2.fake.mailgun.com']
    cmx.return_value = 'mx1.fake.mailgun.com'

    addr = address.validate_address('*****@*****.**')
    assert_not_equal(addr, None)

    # has fallback A, has MX server
    ld.return_value = ['domain.com']
    cmx.return_value = 'domain.com'

    addr = address.validate_address('*****@*****.**')
    assert_not_equal(addr, None)

    # has MX, no server answers
    ld.return_value = ['mx.example.com']
    cmx.return_value = None

    addr = address.validate_address('*****@*****.**')
    assert_equal(addr, None)

    # no MX
    ld.return_value = []
    cmx.return_value = None

    addr = address.validate_address('*****@*****.**')
    assert_equal(addr, None)
Beispiel #8
0
def test_fetch_localizer_calculation_task():
    local_url = "file://" + tst.datadir
    ids = np.asarray(['S%2d' % i for i in range(94)])
    ids = ids.view(dtype=[('subject_id', 'S3')])
    tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids)
    tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids)

    # Disabled: cannot be tested without actually fetching covariates CSV file
    # All subjects
    dataset = func.fetch_localizer_calculation_task(data_dir=tst.tmpdir,
                                                    url=local_url,
                                                    verbose=0)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 1)
    assert_equal(len(dataset.cmaps), 1)

    # 20 subjects
    dataset = func.fetch_localizer_calculation_task(n_subjects=20,
                                                    data_dir=tst.tmpdir,
                                                    url=local_url,
                                                    verbose=0)
    assert_true(isinstance(dataset.ext_vars, np.recarray))
    assert_true(isinstance(dataset.cmaps[0], _basestring))
    assert_equal(dataset.ext_vars.size, 20)
    assert_equal(len(dataset.cmaps), 20)
    assert_not_equal(dataset.description, '')
Beispiel #9
0
  def test_get_collections(self):
    resp = self.db.list_sentry_roles_by_group() # Non Sentry Admin can do that
    assert_not_equal(0, resp.status.value, resp)
    assert_true('denied' in resp.status.message, resp)

    resp = self.db.list_sentry_roles_by_group(groupName='*')
    assert_equal(0, resp.status.value, resp)
Beispiel #10
0
def test_calculate_scale():
    # Test for special cases in scale calculation
    npa = np.array
    SIAW = SlopeInterArrayWriter
    SAW = SlopeArrayWriter
    # Offset handles scaling when it can
    aw = SIAW(npa([-2, -1], dtype=np.int8), np.uint8)
    assert_equal(get_slope_inter(aw), (1.0, -2.0))
    # Sign flip handles these cases
    aw = SAW(npa([-2, -1], dtype=np.int8), np.uint8)
    assert_equal(get_slope_inter(aw), (-1.0, 0.0))
    aw = SAW(npa([-2, 0], dtype=np.int8), np.uint8)
    assert_equal(get_slope_inter(aw), (-1.0, 0.0))
    # But not when min magnitude is too large (scaling mechanism kicks in)
    aw = SAW(npa([-510, 0], dtype=np.int16), np.uint8)
    assert_equal(get_slope_inter(aw), (-2.0, 0.0))
    # Or for floats (attempts to expand across range)
    aw = SAW(npa([-2, 0], dtype=np.float32), np.uint8)
    assert_not_equal(get_slope_inter(aw), (-1.0, 0.0))
    # Case where offset handles scaling
    aw = SIAW(npa([-1, 1], dtype=np.int8), np.uint8)
    assert_equal(get_slope_inter(aw), (1.0, -1.0))
    # Can't work for no offset case
    assert_raises(WriterError, SAW, npa([-1, 1], dtype=np.int8), np.uint8)
    # Offset trick can't work when max is out of range
    aw = SIAW(npa([-1, 255], dtype=np.int16), np.uint8)
    slope_inter = get_slope_inter(aw)
    assert_not_equal(slope_inter, (1.0, -1.0))
Beispiel #11
0
def test_fetch_adhd():
    local_url = "file://" + tst.datadir

    sub1 = [3902469, 7774305, 3699991]
    sub2 = [2014113, 4275075, 1019436,
            3154996, 3884955, 27034,
            4134561, 27018, 6115230,
            27037, 8409791, 27011]
    sub3 = [3007585, 8697774, 9750701,
            10064, 21019, 10042,
            10128, 2497695, 4164316,
            1552181, 4046678, 23012]
    sub4 = [1679142, 1206380, 23008,
            4016887, 1418396, 2950754,
            3994098, 3520880, 1517058,
            9744150, 1562298, 3205761, 3624598]
    subs = np.array(sub1 + sub2 + sub3 + sub4, dtype='i8')
    subs = subs.view(dtype=[('Subject', 'i8')])
    tst.mock_fetch_files.add_csv(
        'ADHD200_40subs_motion_parameters_and_phenotypics.csv',
        subs)

    adhd = func.fetch_adhd(data_dir=tst.tmpdir, url=local_url,
                           n_subjects=12, verbose=0)
    assert_equal(len(adhd.func), 12)
    assert_equal(len(adhd.confounds), 12)
    assert_equal(len(tst.mock_url_request.urls), 13)  # Subjects + phenotypic
    assert_not_equal(adhd.description, '')
    def test_limit_documents(self):
        c = self.conn.collection
        c.test.create()

        c.test.docs.create({"doc": 1})
        c.test.docs.create({"doc": 2})

        assert_not_equal(
            self.conn.collection.test.documents().count,
            1
        )

        assert_equal(
            len(self.conn.collection.test.documents()),
            self.conn.collection.test.documents().count
        )

        assert_equal(
            len(self.conn.collection.test.documents().limit(1)),
            1
        )

        assert_equal(
            len(self.conn.collection.test
                .documents()
                .offset(2)
                .limit(1)),
            0)
Beispiel #13
0
def test_group(module, EXAMPLE):
    assert_hasattr(module, 'Group')
    assert_is_instance(module.Group, type)

    shared = module.Shared.from_dict(EXAMPLE['shared'])
    values = EXAMPLE['values']
    for value in values:
        shared.add_value(value)

    group1 = module.Group()
    group1.init(shared)
    for value in values:
        group1.add_value(shared, value)
    group2 = module.Group.from_values(shared, values)
    assert_close(group1.dump(), group2.dump())

    group = module.Group.from_values(shared, values)
    dumped = group.dump()
    group.init(shared)
    group.load(dumped)
    assert_close(group.dump(), dumped)

    for value in values:
        group2.remove_value(shared, value)
    assert_not_equal(group1, group2)
    group2.merge(shared, group1)

    for value in values:
        group1.score_value(shared, value)
    for _ in xrange(10):
        value = group1.sample_value(shared)
        group1.score_value(shared, value)
        module.sample_group(shared, 10)
    group1.score_data(shared)
    group2.score_data(shared)
Beispiel #14
0
def test_gaussian_random_seed():

    genson.set_global_seed(42)

    gson = \
    """
    {
        "gaussian_random_seed" : gaussian(0, 1, draws=2)
    }
    """
    gen = genson.loads(gson)
    vals = [val['gaussian_random_seed'] for val in gen]
    assert_equal(vals[0], 0.4967141530112327)
    assert_equal(vals[1], -0.13826430117118466)

    genson.set_global_seed(None)
    gen.reset()
    vals = [val['gaussian_random_seed'] for val in gen]
    assert_not_equal(vals[0], 0.4967141530112327)
    assert_not_equal(vals[1], -0.13826430117118466)

    genson.set_global_seed(42)
    gen.reset()
    gen = genson.loads(gson)
    vals = [val['gaussian_random_seed'] for val in gen]
    assert_equal(vals[0], 0.4967141530112327)
    assert_equal(vals[1], -0.13826430117118466)
Beispiel #15
0
    def test_001_is_runnable(self):
        """ Still running after we launch it.

        As authauth is a persistent server it should be still be running!"""
        time.sleep(1)
        assert_not_equal(self.process, None)
        assert_equal(self.process.poll(), None)
Beispiel #16
0
def test_Opener_various():
    # Check we can do all sorts of files here
    message = b"Oh what a giveaway"
    bz2_fileno = hasattr(BZ2File, 'fileno')
    with InTemporaryDirectory():
        sobj = BytesIO()
        for input in ('test.txt',
                      'test.txt.gz',
                      'test.txt.bz2',
                      sobj):
            with Opener(input, 'wb') as fobj:
                fobj.write(message)
                assert_equal(fobj.tell(), len(message))
            if input == sobj:
                input.seek(0)
            with Opener(input, 'rb') as fobj:
                message_back = fobj.read()
                assert_equal(message, message_back)
                if input == sobj:
                    # Fileno is unsupported for BytesIO
                    assert_raises(UnsupportedOperation, fobj.fileno)
                elif input.endswith('.bz2') and not bz2_fileno:
                    assert_raises(AttributeError, fobj.fileno)
                else:
                    # Just check there is a fileno
                    assert_not_equal(fobj.fileno(), 0)
def test2():
    datafile = resource_filename('sknano', 'data/nanotubes/1010_1cell.data')
    atoms = DATAData(fpath=datafile).atoms
    atoms.assign_unique_ids()
    atoms.update_attrs()
    assert_not_equal(atoms.Natoms, 20)
    assert_equal(atoms.Natoms, 40)
Beispiel #18
0
def test_complete():
    fig = plt.figure('Figure with a label?', figsize=(10, 6))

    plt.suptitle('Can you fit any more in a figure?')

    # make some arbitrary data
    x, y = np.arange(8), np.arange(10)
    data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
    v = np.sin(v * -0.6)

    plt.subplot(3, 3, 1)
    plt.plot(list(xrange(10)))

    plt.subplot(3, 3, 2)
    plt.contourf(data, hatches=['//', 'ooo'])
    plt.colorbar()

    plt.subplot(3, 3, 3)
    plt.pcolormesh(data)

    plt.subplot(3, 3, 4)
    plt.imshow(data)

    plt.subplot(3, 3, 5)
    plt.pcolor(data)

    plt.subplot(3, 3, 6)
    plt.streamplot(x, y, u, v)

    plt.subplot(3, 3, 7)
    plt.quiver(x, y, u, v)

    plt.subplot(3, 3, 8)
    plt.scatter(x, x**2, label='$x^2$')
    plt.legend(loc='upper left')

    plt.subplot(3, 3, 9)
    plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)

    ###### plotting is done, now test its pickle-ability #########

    # Uncomment to debug any unpicklable objects. This is slow (~200 seconds).
#    recursive_pickle(fig)

    result_fh = BytesIO()
    pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)

    plt.close('all')

    # make doubly sure that there are no figures left
    assert_equal(plt._pylab_helpers.Gcf.figs, {})

    # wind back the fh and load in the figure
    result_fh.seek(0)
    fig = pickle.load(result_fh)

    # make sure there is now a figure manager
    assert_not_equal(plt._pylab_helpers.Gcf.figs, {})

    assert_equal(fig.get_label(), 'Figure with a label?')
    def test_delete_cohort_with_tags(self):
        # add a tag to the cohort
        unparsed_tag = "test-tag"
        response = self.app.post('/cohorts/{0}/tag/add/{1}'
                                 .format(self.cohort.id, unparsed_tag))
        assert_true(response.data.find('"tags":') >= 0)
        assert_true(response.data.find('"name": "test-tag"') >= 0)
        self.session.commit()

        t = self.session.query(TagStore.id) \
            .filter(TagStore.name == 'test-tag') \
            .first()
        assert_not_equal(t, None)
        ct = self.session.query(CohortTagStore) \
            .filter(CohortTagStore.cohort_id == self.cohort.id) \
            .filter(CohortTagStore.tag_id == t[0]) \
            .first()
        assert_not_equal(ct, None)

        # delete the cohort
        cohort_id = self.cohort.id
        response = self.app.post('/cohorts/delete/{0}'.format(self.cohort.id))

        assert_true(response.data.find('isRedirect') >= 0)
        assert_true(response.data.find('/cohorts') >= 0)
        self.session.commit()

        c = self.session.query(CohortStore).get(cohort_id)
        assert_equal(c, None)
def step_impl(context, data):
    if data == 'new':
        assert_not_equal(context.page_data, context.page.get_all_page_data())
    elif data == 'previous':
        assert_equal(context.old_page_data, context.page.get_all_page_data())
    else:
        raise Exception('Unknown data')
Beispiel #21
0
def test_yahoo_pass():
    with patch.object(address, 'mail_exchanger_lookup') as mock_method:
        mock_method.side_effect = mock_exchanger_lookup

        # valid length range
        for i in range(4, 33):
            localpart = ''.join(random.choice(string.ascii_letters) for x in range(i))
            addr = address.validate_address(localpart + DOMAIN)
            assert_not_equal(addr, None)

        # start must be letter
        for i in string.ascii_letters:
            localpart = str(i) + 'aaa'
            addr = address.validate_address(localpart + DOMAIN)
            assert_not_equal(addr, None)

        # end must be letter or number
        for i in string.ascii_letters + string.digits:
            localpart = 'aaa' + str(i)
            addr = address.validate_address(localpart + DOMAIN)
            assert_not_equal(addr, None)

        # must be letter, num, and underscore
        for i in string.ascii_letters + string.digits + '_':
            localpart = 'aa' + str(i) + '00'
            addr = address.validate_address(localpart + DOMAIN)
            assert_not_equal(addr, None)

        # only zero or one dot (.) allowed
        for i in range(0, 2):
            localpart = 'aa' + '.'*i + '00'
            addr = address.validate_address(localpart + DOMAIN)
            assert_not_equal(addr, None)
 def test_configure_get(self):
     response = self.app.get('/metrics/configure/BytesAdded')
     assert_not_equal(
         response.data.find('name="positive_only_sum"'),
         -1,
         'A form to configure a BytesAdded metric was not rendered'
     )
def test_info():
    "Check that Inspector.info fills out various fields as expected."
    i = inspector.info(Call, oname='Call')
    nt.assert_equal(i['type_name'], 'type')
    expted_class = str(type(type))  # <class 'type'> (Python 3) or <type 'type'>
    nt.assert_equal(i['base_class'], expted_class)
    nt.assert_regex(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>")
    fname = __file__
    if fname.endswith(".pyc"):
        fname = fname[:-1]
    # case-insensitive comparison needed on some filesystems
    # e.g. Windows:
    nt.assert_equal(i['file'].lower(), compress_user(fname).lower())
    nt.assert_equal(i['definition'], None)
    nt.assert_equal(i['docstring'], Call.__doc__)
    nt.assert_equal(i['source'], None)
    nt.assert_true(i['isclass'])
    nt.assert_equal(i['init_definition'], "Call(x, y=1)")
    nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)

    i = inspector.info(Call, detail_level=1)
    nt.assert_not_equal(i['source'], None)
    nt.assert_equal(i['docstring'], None)

    c = Call(1)
    c.__doc__ = "Modified instance docstring"
    i = inspector.info(c)
    nt.assert_equal(i['type_name'], 'Call')
    nt.assert_equal(i['docstring'], "Modified instance docstring")
    nt.assert_equal(i['class_docstring'], Call.__doc__)
    nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
    nt.assert_equal(i['call_docstring'], Call.__call__.__doc__)
Beispiel #24
0
def test_outgoing_url():
    redirect_url = settings.REDIRECT_URL
    secretkey = settings.REDIRECT_SECRET_KEY
    exceptions = settings.REDIRECT_URL_WHITELIST
    settings.REDIRECT_URL = 'http://example.net'
    settings.REDIRECT_SECRET_KEY = 'sekrit'
    settings.REDIRECT_URL_WHITELIST = ['nicedomain.com']

    try:
        myurl = 'http://example.com'
        s = urlresolvers.get_outgoing_url(myurl)

        # Regular URLs must be escaped.
        eq_(s,
            'http://example.net/bc7d4bb262c9f0b0f6d3412ede7d3252c2e311bb1d55f6'
            '2315f636cb8a70913b/'
            'http%3A//example.com')

        # No double-escaping of outgoing URLs.
        s2 = urlresolvers.get_outgoing_url(s)
        eq_(s, s2)

        evil = settings.REDIRECT_URL.rstrip('/') + '.evildomain.com'
        s = urlresolvers.get_outgoing_url(evil)
        assert_not_equal(s, evil,
                         'No subdomain abuse of double-escaping protection.')

        nice = 'http://nicedomain.com/lets/go/go/go'
        eq_(nice, urlresolvers.get_outgoing_url(nice))

    finally:
        settings.REDIRECT_URL = redirect_url
        settings.REDIRECT_SECRET_KEY = secretkey
        settings.REDIRECT_URL_WHITELIST = exceptions
Beispiel #25
0
    def test_allocate(self):
        self.block.from_list([0] * 100)
        assert_raises(InvalidArgumentError, self.block.allocate)
        assert_raises(InvalidArgumentError, self.block.allocate, None, 0)
        assert_raises(InvalidArgumentError, self.block.allocate, None, -1)
        assert_raises(InvalidArgumentError, self.block.allocate, None, -10)
        assert_raises(InvalidArgumentError, self.block.allocate, [], None)
        assert_raises(InvalidArgumentError, self.block.allocate, [1], 2)

        # Allocate an entire range
        self.block.deallocate((0, 49))
        assert_raises(NotEnoughUnallocatedSpaceError, self.block.allocate, None, 51)
        offset = self.block.allocate(size=50)
        assert_equal(offset, 0)
        assert_equal(self.block.unallocated_ranges, [])

        # Allocate the beginning of a range
        self.block.deallocate((10, 39))
        offset = self.block.allocate(data=[0x12, 0x34, 0xef])
        assert_equal(offset, 10)
        assert_equal(self.block.unallocated_ranges, [(13, 39)])
        assert_equal(self.block[offset:offset + 3].to_list(), [0x12, 0x34, 0xef])
        assert_not_equal(self.block.to_list(), [0] * 100)
        self.block[offset:offset + 3] = [0] * 3
        assert_equal(self.block.to_list(), [0] * 100)
def test_farsi_correction():
    """Verify that a Farsi sentence is corrected"""
    wrong = u"ابن یک جملهٔ آرمایسی است"
    right = u"این یک جملهٔ آزمایشی است"
    corrected = d.suggest(wrong)
    new = u" ".join([word.new for word in corrected])
    assert_not_equal(wrong, new)
 def test_write_uncor(self):
     """ Testcase main """
     assert_equal(self.nvme_read(), 0)
     assert_equal(self.write_uncor(), 0)
     assert_not_equal(self.nvme_read(), 0)
     assert_equal(self.nvme_write(), 0)
     assert_equal(self.nvme_read(), 0)
    def test_rev(self):
        doc = self.create_document({})

        doc._rev = None

        assert_not_equal(doc._id, None)
        assert_equal(doc._rev, None)
Beispiel #29
0
 def test_select(self):
     for db in settings.DATABASES:
         people = Employee.objects.db_manager(db).select()
         assert_not_equal(people.count(), 0)
         for p in people:
             ok_(p.first_name)
             ok_(p.last_name)
Beispiel #30
0
def test_simple_stochastic_synapse(sim, plot_figure=False):
    # in this test we connect
    sim.setup(min_delay=0.5)
    t_stop = 1000.0
    spike_times = np.arange(2.5, t_stop, 5.0)
    source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times))
    neurons = sim.Population(4, sim.IF_cond_exp(tau_syn_E=1.0))
    synapse_type = sim.SimpleStochasticSynapse(weight=0.5,
                                               p=np.array([[0.0, 0.5, 0.5, 1.0]]))
    connections = sim.Projection(source, neurons, sim.AllToAllConnector(),
                                 synapse_type=synapse_type)
    source.record('spikes')
    neurons.record('gsyn_exc')
    sim.run(t_stop)

    data = neurons.get_data().segments[0]
    gsyn = data.analogsignals[0].rescale('uS')
    if plot_figure:
        import matplotlib.pyplot as plt
        for i in range(neurons.size):
            plt.subplot(neurons.size, 1, i+1)
            plt.plot(gsyn.times, gsyn[:, i])
        plt.savefig("test_simple_stochastic_synapse_%s.png" % sim.__name__)
    print(data.analogsignals[0].units)
    crossings = []
    for i in range(neurons.size):
        crossings.append(
                gsyn.times[:-1][np.logical_and(gsyn.magnitude[:-1, i] < 0.4, 0.4 < gsyn.magnitude[1:, i])])
    assert_equal(crossings[0].size, 0)
    assert_less(crossings[1].size, 0.6*spike_times.size)
    assert_greater(crossings[1].size, 0.4*spike_times.size)
    assert_equal(crossings[3].size, spike_times.size)
    assert_not_equal(crossings[1], crossings[2])
    print(crossings[1].size / spike_times.size)
    return data
Beispiel #31
0
 def test_locks_for_meta_tiles(self):
     assert_not_equal(
         self.tile_mgr.lock(Tile((0, 0, 2))).lock_file,
         self.tile_mgr.lock(Tile((2, 0, 2))).lock_file)
Beispiel #32
0
def test_simple():
    load_builtins()
    with chdir(PARENT):
        assert_not_equal(os.getcwd(), HERE)
        dirstack.cd(["tests"])
        assert_equal(os.getcwd(), HERE)
Beispiel #33
0
def test_version():
    argv = ['anorack', '--version']
    (actual_stdout, actual_stderr) = run_main(argv, None)
    assert_not_equal('', actual_stdout)
    assert_equal('', actual_stderr)
Beispiel #34
0
def test_copy_files():
    cluster = pseudo_hdfs4.shared_cluster()

    try:
        c = make_logged_in_client()
        user = User.objects.get(username='******')

        prefix = '/tmp/test_copy_files'

        if cluster.fs.exists(prefix):
            cluster.fs.rmtree(prefix)

        # Jars in various locations
        deployment_dir = '%s/workspace' % prefix
        external_deployment_dir = '%s/deployment' % prefix
        jar_1 = '%s/udf1.jar' % prefix
        jar_2 = '%s/lib/udf2.jar' % prefix
        jar_3 = '%s/udf3.jar' % deployment_dir
        jar_4 = '%s/lib/udf4.jar' % deployment_dir  # Never move

        cluster.fs.mkdir(prefix)
        cluster.fs.create(jar_1)
        cluster.fs.create(jar_2)
        cluster.fs.create(jar_3)
        cluster.fs.create(jar_4)

        class MockNode():
            def __init__(self, jar_path):
                self.jar_path = jar_path

        class MockJob():
            def __init__(self):
                self.node_list = [
                    MockNode(jar_1),
                    MockNode(jar_2),
                    MockNode(jar_3),
                    MockNode(jar_4),
                ]

            def get_application_filename(self):
                return 'workflow.xml'

        submission = Submission(user,
                                job=MockJob(),
                                fs=cluster.fs,
                                jt=cluster.jt)

        submission._copy_files(deployment_dir, "<xml>My XML</xml>")
        submission._copy_files(external_deployment_dir, "<xml>My XML</xml>")

        # All sources still there
        assert_true(cluster.fs.exists(jar_1))
        assert_true(cluster.fs.exists(jar_2))
        assert_true(cluster.fs.exists(jar_3))
        assert_true(cluster.fs.exists(jar_4))

        deployment_dir = deployment_dir + '/lib'
        external_deployment_dir = external_deployment_dir + '/lib'

        list_dir_workspace = cluster.fs.listdir(deployment_dir)
        list_dir_deployement = cluster.fs.listdir(external_deployment_dir)

        # All destinations there
        assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'),
                    list_dir_workspace)
        assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'),
                    list_dir_workspace)

        assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'),
                    list_dir_deployement)
        assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'),
                    list_dir_deployement)

        stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
        stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
        stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
        stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')

        submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>")

        assert_not_equal(
            stats_udf1['fileId'],
            cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
        assert_not_equal(
            stats_udf2['fileId'],
            cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
        assert_not_equal(
            stats_udf3['fileId'],
            cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
        assert_equal(stats_udf4['fileId'],
                     cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])

    finally:
        try:
            cluster.fs.rmtree(prefix)
        except:
            LOG.exception('failed to remove %s' % prefix)
Beispiel #35
0
def test_equality():
    """
    Test featureset equality
    """

    # create a featureset
    fs1, _ = make_classification_data(num_examples=100,
                                      num_features=4,
                                      num_labels=3,
                                      train_test_ratio=1.0)

    # create a featureset with a different set but same number
    # of features and everything else the same
    fs2, _ = make_classification_data(num_examples=100,
                                      num_features=4,
                                      num_labels=3,
                                      train_test_ratio=1.0)

    fs2.features *= 2

    # create a featureset with different feature names
    # and everything else the same
    fs3, _ = make_classification_data(num_examples=100,
                                      num_features=4,
                                      num_labels=3,
                                      feature_prefix='g',
                                      train_test_ratio=1.0)

    # create a featureset with a different set of labels
    # and everything else the same
    fs4, _ = make_classification_data(num_examples=100,
                                      num_features=4,
                                      num_labels=2,
                                      train_test_ratio=1.0)

    # create a featureset with a different set but same number
    # of IDs and everything else the same
    fs5, _ = make_classification_data(num_examples=100,
                                      num_features=4,
                                      num_labels=3,
                                      train_test_ratio=1.0)
    fs5.ids = np.array(['A' + i for i in fs2.ids])

    # create a featureset with a different vectorizer
    # and everything else the same
    fs6, _ = make_classification_data(num_examples=100,
                                      num_features=4,
                                      num_labels=3,
                                      train_test_ratio=1.0,
                                      use_feature_hashing=True,
                                      feature_bins=2)

    # create a featureset with a different number of features
    # and everything else the same
    fs7, _ = make_classification_data(num_examples=100,
                                      num_features=5,
                                      num_labels=3,
                                      train_test_ratio=1.0)

    # create a featureset with a different number of examples
    # and everything else the same
    fs8, _ = make_classification_data(num_examples=200,
                                      num_features=4,
                                      num_labels=3,
                                      train_test_ratio=1.0)

    # create a featureset with a different vectorizer instance
    # and everything else the same
    fs9, _ = make_classification_data(num_examples=100,
                                      num_features=4,
                                      num_labels=3,
                                      train_test_ratio=1.0)

    # now check for the expected equalities
    assert_not_equal(fs1, fs2)
    assert_not_equal(fs1, fs3)
    assert_not_equal(fs1, fs4)
    assert_not_equal(fs1, fs5)
    assert_not_equal(fs1, fs6)
    assert_not_equal(fs1, fs7)
    assert_not_equal(fs1, fs8)
    assert_not_equal(id(fs1.vectorizer), id(fs9.vectorizer))
    eq_(fs1, fs9)
Beispiel #36
0
    def test_addOrdersToFleetsForTurn(self):

        NORTH_OFFSET = (0, 50, 0)
        EAST_OFFSET = (50, 0, 0)
        SOUTH_OFFSET = (0, -50, 0)
        WEST_OFFSET = (-50, 0, 0)
        #OFFSET_LIST = [NORTH_OFFSET, EAST_OFFSET, SOUTH_OFFSET, WEST_OFFSET ]

        fc_0 = self.player0.fleetCommand
        hw_0_xy = self.p0_colonyHW_obj.planet.xy

        # ---------- setup and test -----------
        startingShipCount = startingShipDesignsCount()
        assert_equal(len(fc_0.fleets), startingShipCount)

        for key, obj in fc_0.fleets.items():
            assert_true(isinstance(obj.fleetOrders,
                                   list))  # fleet has list of orders
            assert_equal(len(obj.fleetOrders), 0)  # fleet orders is empty
            assert_equal(obj.xy, hw_0_xy)  # fleet is at HW (x,y)

        # ----------- give orders and test -------

        print("hw_0_xy: %s; n_order: %s; e_order:%s" %
              (hw_0_xy, TestFleets._update_xy_orders(hw_0_xy, NORTH_OFFSET),
               TestFleets._update_xy_orders(hw_0_xy, EAST_OFFSET)))

        #assert_false(True)

        # ------ orders for fleet -----
        testCommands = {
            0: {
                "orders": [{
                    "coordinates":
                    TestFleets._update_xy_orders(
                        hw_0_xy, NORTH_OFFSET),  # or at currentLocation
                    "velocity_command":
                    "speed_levels_from_list",
                    "waypoint_action":
                    "action_from_list"
                }]
            },
            1: {
                "orders": [{
                    "coordinates":
                    TestFleets._update_xy_orders(
                        hw_0_xy, EAST_OFFSET),  # or at currentLocation  
                    "velocity_command":
                    "speed_levels_from_list",
                    "waypoint_action":
                    "action_from_list"
                }]
            },
            2: {
                "orders": [{
                    "coordinates":
                    TestFleets._update_xy_orders(
                        hw_0_xy, SOUTH_OFFSET),  # or at currentLocation
                    "velocity_command":
                    "speed_levels_from_list",
                    "waypoint_action":
                    "action_from_list"
                }]
            },
            3: {
                "orders": [{
                    "coordinates":
                    TestFleets._update_xy_orders(
                        hw_0_xy, WEST_OFFSET),  # or at currentLocation  
                    "velocity_command":
                    "speed_levels_from_list",
                    "waypoint_action":
                    "action_from_list"
                }]
            }
        }

        # ---- send orders to fleet -----
        fc_0.addOrdersToFleetsForTurn(testCommands)

        # ----- test fleet location -----
        for key, obj in fc_0.fleets.items():
            assert_true(isinstance(obj.fleetOrders,
                                   list))  # fleet has list of orders
            assert_equal(len(obj.fleetOrders), 1)  # fleet orders is empty
            print("xy{} destinationxy{}".format(
                obj.xy, obj.fleetOrders[0]["coordinates"]))
            assert_equal(obj.xy, hw_0_xy)

            assert_not_equal(obj.xy, obj.fleetOrders[0]["coordinates"])
            assert_equal(obj.destinationXY, obj.fleetOrders[0]["coordinates"])
    def check_desktop_links(self, links):
        """Desktop links should have the correct firefox version"""
        key = 'firefox-%s' % self.latest_version()

        for link in links:
            assert_not_equal(pq(link).attr('href').find(key), -1)
Beispiel #38
0
def test_repaired_neuron():
    result = CutPlane.find(DATA / 'bio_neuron-000.h5', bin_width=10).to_json()
    assert_not_equal(result['status'], 'ok')
Beispiel #39
0
def test_versioned_graph_save_and_restore(workdir):
    path = os.path.join(workdir, 'my-graph.json')
    static = dag({"unchanging": "eternal"}, {})

    graph_s0 = dag({
        "name": "graph",
        "version": 0
    }, {
        "static": static,
        "changing": dag({"a": "A"}, {}),
        "removed": dag({"bleep": "blorp"}, {}),
    })

    graph_s1 = dag({
        "name": "graph",
        "version": 1
    }, {
        "static": static,
        "changing": dag({"b": "B"}, {}),
    })

    version_map = tokendict.TokenDict({
        "v0": graph_s0,
        "v1": graph_s0,
        "v2": graph_s1,
        "v3": graph_s1
    })

    transformer = merky.AnnotationTransformer()
    writer = structure.JSONFileWriteStructure(path)
    writer.populate(transformer.transform(version_map))
    writer.close()

    reader = structure.JSONFileReadStructure(path)
    tokens = tokendict.TokenDict.from_token(reader.head, reader.get)

    tools.assert_equal(["v0", "v1", "v2", "v3"], list(tokens.keys()))

    # Same states get same tokens.
    tools.assert_equal(tokens.dict_["v0"], tokens.dict_["v1"])
    tools.assert_equal(tokens.dict_["v2"], tokens.dict_["v3"])
    tools.assert_equal(getpath(reader, "v0", 1, "static"),
                       getpath(reader, "v2", 1, "static"))

    # Different states, different tokens.
    tools.assert_not_equal(tokens.dict_["v1"], tokens.dict_["v2"])
    tools.assert_not_equal(getpath(reader, "v0", 1, "changing"),
                           getpath(reader, "v2", 1, "changing"))

    restored = tokendict.TokenDict.from_token(
        reader.head, reader.get, attrgraph.AttributeGraph.from_token)

    tools.assert_equal(["v0", "v1", "v2", "v3"], list(restored.keys()))

    tools.assert_equal(dict(graph_s0.attrs), dict(restored["v0"].attrs))
    tools.assert_equal(dict(graph_s1.attrs), dict(restored["v2"].attrs))
    tools.assert_equal(["changing", "removed", "static"],
                       list(sorted(restored["v0"].members.keys())))
    tools.assert_equal(["changing", "static"],
                       list(sorted(restored["v2"].members.keys())))
    tools.assert_equal(dict(graph_s0.members["changing"].attrs),
                       dict(restored["v1"].members["changing"].attrs))
    tools.assert_equal(dict(graph_s1.members["changing"].attrs),
                       dict(restored["v3"].members["changing"].attrs))
    tools.assert_equal(dict(restored["v0"].members["static"].attrs),
                       dict(restored["v3"].members["static"].attrs))
def test_independent():
	d = IndependentComponentsDistribution(
		[NormalDistribution(5, 2), ExponentialDistribution(2)])

	assert_equal(round(d.log_probability((4, 1)), 4), -3.0439)
	assert_equal(round(d.log_probability((100, 0.001)), 4), -1129.0459)

	d = IndependentComponentsDistribution([NormalDistribution(5, 2),
										   ExponentialDistribution(2)],
										  weights=[18., 1.])

	assert_equal(round(d.log_probability((4, 1)), 4), -32.5744)
	assert_equal(round(d.log_probability((100, 0.001)), 4), -20334.5764)

	d.fit([(5, 1), (5.2, 1.7), (4.7, 1.9), (4.9, 2.4), (4.5, 1.2)])

	assert_equal(round(d.parameters[0][0].parameters[0], 4), 4.86)
	assert_equal(round(d.parameters[0][0].parameters[1], 4), 0.2417)
	assert_equal(round(d.parameters[0][1].parameters[0], 4), 0.6098)

	d = IndependentComponentsDistribution([NormalDistribution(5, 2),
										   UniformDistribution(0, 10)])
	d.fit([(0, 0), (5, 0), (3, 0), (5, -5), (7, 0),
		   (3, 0), (4, 0), (5, 0), (2, 20)], inertia=0.5)

	assert_equal(round(d.parameters[0][0].parameters[0], 4), 4.3889)
	assert_equal(round(d.parameters[0][0].parameters[1], 4), 1.9655)

	assert_equal(d.parameters[0][1].parameters[0], -2.5)
	assert_equal(d.parameters[0][1].parameters[1], 15)

	d.fit([(0, 0), (5, 0), (3, 0), (5, -5), (7, 0),
		   (3, 0), (4, 0), (5, 0), (2, 20)], inertia=0.75)

	assert_not_equal(round(d.parameters[0][0].parameters[0], 4), 4.3889)
	assert_not_equal(round(d.parameters[0][0].parameters[1], 4), 1.9655)

	assert_not_equal(d.parameters[0][1].parameters[0], -2.5)
	assert_not_equal(d.parameters[0][1].parameters[1], 15)

	d = IndependentComponentsDistribution([NormalDistribution(5, 2),
										   UniformDistribution(0, 10)])

	d.summarize([(0, 0), (5, 0), (3, 0)])
	d.summarize([(5, -5), (7, 0)])
	d.summarize([(3, 0), (4, 0), (5, 0), (2, 20)])
	d.from_summaries(inertia=0.5)

	assert_equal(round(d.parameters[0][0].parameters[0], 4), 4.3889)
	assert_equal(round(d.parameters[0][0].parameters[1], 4), 1.9655)

	assert_equal(d.parameters[0][1].parameters[0], -2.5)
	assert_equal(d.parameters[0][1].parameters[1], 15)

	d.freeze()
	d.fit([(1, 7), (7, 2), (2, 4), (2, 4), (1, 4)])

	assert_equal(round(d.parameters[0][0].parameters[0], 4), 4.3889)
	assert_equal(round(d.parameters[0][0].parameters[1], 4), 1.9655)

	assert_equal(d.parameters[0][1].parameters[0], -2.5)
	assert_equal(d.parameters[0][1].parameters[1], 15)

	e = Distribution.from_json(d.to_json())
	assert_equal(e.name, "IndependentComponentsDistribution")

	assert_equal(round(e.parameters[0][0].parameters[0], 4), 4.3889)
	assert_equal(round(e.parameters[0][0].parameters[1], 4), 1.9655)

	assert_equal(e.parameters[0][1].parameters[0], -2.5)
	assert_equal(e.parameters[0][1].parameters[1], 15)

	f = pickle.loads(pickle.dumps(e))
	assert_equal(e.name, "IndependentComponentsDistribution")

	assert_equal(round(f.parameters[0][0].parameters[0], 4), 4.3889)
	assert_equal(round(f.parameters[0][0].parameters[1], 4), 1.9655)

	assert_equal(f.parameters[0][1].parameters[0], -2.5)
	assert_equal(f.parameters[0][1].parameters[1], 15)

	X = numpy.array([[0.5, 0.2, 0.7],
		          [0.3, 0.1, 0.9],
		          [0.4, 0.3, 0.8],
		          [0.3, 0.3, 0.9],
		          [0.3, 0.2, 0.6],
		          [0.5, 0.2, 0.8]])

	d = IndependentComponentsDistribution.from_samples(X,
		distributions=NormalDistribution)
	assert_almost_equal(d.parameters[0][0].parameters[0], 0.38333, 4)
	assert_almost_equal(d.parameters[0][0].parameters[1], 0.08975, 4)
	assert_almost_equal(d.parameters[0][1].parameters[0], 0.21666, 4)
	assert_almost_equal(d.parameters[0][1].parameters[1], 0.06872, 4)
	assert_almost_equal(d.parameters[0][2].parameters[0], 0.78333, 4)
	assert_almost_equal(d.parameters[0][2].parameters[1], 0.10672, 4)

	d = IndependentComponentsDistribution.from_samples(X,
		distributions=ExponentialDistribution)
	assert_almost_equal(d.parameters[0][0].parameters[0], 2.6087, 4)
	assert_almost_equal(d.parameters[0][1].parameters[0], 4.6154, 4)
	assert_almost_equal(d.parameters[0][2].parameters[0], 1.2766, 4)

	d = IndependentComponentsDistribution.from_samples(X,
		distributions=[NormalDistribution, NormalDistribution, NormalDistribution])
	assert_almost_equal(d.parameters[0][0].parameters[0], 0.38333, 4)
	assert_almost_equal(d.parameters[0][0].parameters[1], 0.08975, 4)
	assert_almost_equal(d.parameters[0][1].parameters[0], 0.21666, 4)
	assert_almost_equal(d.parameters[0][1].parameters[1], 0.06872, 4)
	assert_almost_equal(d.parameters[0][2].parameters[0], 0.78333, 4)
	assert_almost_equal(d.parameters[0][2].parameters[1], 0.10672, 4)

	d = IndependentComponentsDistribution.from_samples(X,
		distributions=[NormalDistribution, LogNormalDistribution, ExponentialDistribution])
	assert_almost_equal(d.parameters[0][0].parameters[0], 0.38333, 4)
	assert_almost_equal(d.parameters[0][0].parameters[1], 0.08975, 4)
	assert_almost_equal(d.parameters[0][1].parameters[0], -1.5898, 4)
	assert_almost_equal(d.parameters[0][1].parameters[1], 0.36673, 4)
	assert_almost_equal(d.parameters[0][2].parameters[0], 1.27660, 4)
Beispiel #41
0
    def test_all_swagger_preserved_in_spec(self):
        """Builder should store the swagger documented cell."""
        expected = '''
        {
            "swagger": "2.0",
            "info" : {"version" : "0.0.0", "title" : "Default Title"},
            "paths": {
                "/some/resource": {
                    "get": {
                        "summary": "Get some resource",
                        "description": "Get some kind of resource?",
                        "operationId": "getSomeResource",
                        "produces": [
                            "application/json"
                        ],
                        "responses": {
                            "200": {
                                "description": "a resource",
                                "schema": {
                                    "type": "object",
                                    "required": ["name"],
                                    "properties": {
                                        "name": {
                                            "type": "string"
                                        }
                                    }
                                }
                            },
                            "400": {
                                "description": "Error retrieving resources",
                                "schema": {
                                    "$ref": "#/definitions/error"
                                }
                            }
                        }
                    }
                }
            }
        }
        '''
        builder = SwaggerSpecBuilder(
            SwaggerCellParser(kernelspec='some_spec',
                              notebook_cells=[{
                                  "source": expected
                              }]))
        builder.add_cell(expected)
        result = builder.build()
        self.maxDiff = None
        self.assertEqual(
            result['paths']['/some/resource']['get']['description'],
            json.loads(expected)['paths']['/some/resource']['get']
            ['description'], 'description was not preserved')
        self.assertTrue('info' in result, 'info was not preserved')
        self.assertTrue('title' in result['info'], 'title was not present')
        self.assertEqual(result['info']['title'],
                         json.loads(expected)['info']['title'],
                         'title was not preserved')
        self.assertEqual(
            json.dumps(result['paths']['/some/resource'], sort_keys=True),
            json.dumps(json.loads(expected)['paths']['/some/resource'],
                       sort_keys=True), 'operations were not as expected')

        new_title = 'new title. same contents.'
        builder.set_default_title(new_title)
        result = builder.build()
        assert_not_equal(result['info']['title'], new_title,
                         'title should not have been changed')
Beispiel #42
0
 def assert_not_equal(self, *args, **kwds):
     assert_not_equal(*args, **kwds)
 def deepcopy_edge_attr(self, H, G):
     assert_equal(G[1][2]['foo'], H[1][2]['foo'])
     G[1][2]['foo'].append(1)
     assert_not_equal(G[1][2]['foo'], H[1][2]['foo'])
Beispiel #44
0
def test_find_file_magic():
    run = ip.find_line_magic('run')
    nt.assert_not_equal(oinspect.find_file(run), None)
 def deepcopy_graph_attr(self, H, G):
     assert_equal(G.graph['foo'], H.graph['foo'])
     G.graph['foo'].append(1)
     assert_not_equal(G.graph['foo'], H.graph['foo'])
Beispiel #46
0
def check_upload(_step, file_name):
    index = get_index(file_name)
    assert_not_equal(index, -1)
Beispiel #47
0
def test_osx():
    nt.assert_not_equal(sys.platform, 'darwin',
                        "This test can't run under osx")
 def deepcopy_node_attr(self, H, G):
     assert_equal(G.nodes[0]['foo'], H.nodes[0]['foo'])
     G.nodes[0]['foo'].append(1)
     assert_not_equal(G.nodes[0]['foo'], H.nodes[0]['foo'])
Beispiel #49
0
def test_decimal_rescale():
    # Test that we don't get back a data array with dtype np.object when our
    # rescale slope is a decimal
    dw = didw.wrapper_from_file(DATA_FILE_DEC_RSCL)
    assert_not_equal(dw.get_data().dtype, np.object)
Beispiel #50
0
    def test_with_multiedges_self_loops(self):
        G = cycle_graph(10)
        XG = nx.Graph()
        XG.add_nodes_from(G)
        XG.add_weighted_edges_from((u, v, u) for u, v in G.edges())
        XGM = nx.MultiGraph()
        XGM.add_nodes_from(G)
        XGM.add_weighted_edges_from((u, v, u) for u, v in G.edges())
        XGM.add_edge(0, 1, weight=2)  # multiedge
        XGS = nx.Graph()
        XGS.add_nodes_from(G)
        XGS.add_weighted_edges_from((u, v, u) for u, v in G.edges())
        XGS.add_edge(0, 0, weight=100)  # self loop

        # Dict of dicts
        # with self loops, OK
        dod = to_dict_of_dicts(XGS)
        GG = from_dict_of_dicts(dod, create_using=nx.Graph)
        assert_nodes_equal(XGS.nodes(), GG.nodes())
        assert_edges_equal(XGS.edges(), GG.edges())
        GW = to_networkx_graph(dod, create_using=nx.Graph)
        assert_nodes_equal(XGS.nodes(), GW.nodes())
        assert_edges_equal(XGS.edges(), GW.edges())
        GI = nx.Graph(dod)
        assert_nodes_equal(XGS.nodes(), GI.nodes())
        assert_edges_equal(XGS.edges(), GI.edges())

        # Dict of lists
        # with self loops, OK
        dol = to_dict_of_lists(XGS)
        GG = from_dict_of_lists(dol, create_using=nx.Graph)
        # dict of lists throws away edge data so set it to none
        enone = [(u, v, {}) for (u, v, d) in XGS.edges(data=True)]
        assert_nodes_equal(sorted(XGS.nodes()), sorted(GG.nodes()))
        assert_edges_equal(enone, sorted(GG.edges(data=True)))
        GW = to_networkx_graph(dol, create_using=nx.Graph)
        assert_nodes_equal(sorted(XGS.nodes()), sorted(GW.nodes()))
        assert_edges_equal(enone, sorted(GW.edges(data=True)))
        GI = nx.Graph(dol)
        assert_nodes_equal(sorted(XGS.nodes()), sorted(GI.nodes()))
        assert_edges_equal(enone, sorted(GI.edges(data=True)))

        # Dict of dicts
        # with multiedges, OK
        dod = to_dict_of_dicts(XGM)
        GG = from_dict_of_dicts(dod,
                                create_using=nx.MultiGraph,
                                multigraph_input=True)
        assert_nodes_equal(sorted(XGM.nodes()), sorted(GG.nodes()))
        assert_edges_equal(sorted(XGM.edges()), sorted(GG.edges()))
        GW = to_networkx_graph(dod,
                               create_using=nx.MultiGraph,
                               multigraph_input=True)
        assert_nodes_equal(sorted(XGM.nodes()), sorted(GW.nodes()))
        assert_edges_equal(sorted(XGM.edges()), sorted(GW.edges()))
        GI = nx.MultiGraph(
            dod)  # convert can't tell whether to duplicate edges!
        assert_nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
        #assert_not_equal(sorted(XGM.edges()), sorted(GI.edges()))
        assert_false(sorted(XGM.edges()) == sorted(GI.edges()))
        GE = from_dict_of_dicts(dod,
                                create_using=nx.MultiGraph,
                                multigraph_input=False)
        assert_nodes_equal(sorted(XGM.nodes()), sorted(GE.nodes()))
        assert_not_equal(sorted(XGM.edges()), sorted(GE.edges()))
        GI = nx.MultiGraph(XGM)
        assert_nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes()))
        assert_edges_equal(sorted(XGM.edges()), sorted(GI.edges()))
        GM = nx.MultiGraph(G)
        assert_nodes_equal(sorted(GM.nodes()), sorted(G.nodes()))
        assert_edges_equal(sorted(GM.edges()), sorted(G.edges()))

        # Dict of lists
        # with multiedges, OK, but better write as DiGraph else you'll
        # get double edges
        dol = to_dict_of_lists(G)
        GG = from_dict_of_lists(dol, create_using=nx.MultiGraph)
        assert_nodes_equal(sorted(G.nodes()), sorted(GG.nodes()))
        assert_edges_equal(sorted(G.edges()), sorted(GG.edges()))
        GW = to_networkx_graph(dol, create_using=nx.MultiGraph)
        assert_nodes_equal(sorted(G.nodes()), sorted(GW.nodes()))
        assert_edges_equal(sorted(G.edges()), sorted(GW.edges()))
        GI = nx.MultiGraph(dol)
        assert_nodes_equal(sorted(G.nodes()), sorted(GI.nodes()))
        assert_edges_equal(sorted(G.edges()), sorted(GI.edges()))
Beispiel #51
0
def test_fasta__hash():
    assert_equal(hash(FASTA("A", "B", "C")), hash(FASTA("A", "B", "C")))
    assert_not_equal(hash(FASTA("A", "B", "C")), hash(FASTA("B", "B", "C")))
    assert_not_equal(hash(FASTA("A", "B", "C")), hash(FASTA("A", "C", "C")))
    assert_not_equal(hash(FASTA("A", "B", "C")), hash(FASTA("A", "B", "D")))
Beispiel #52
0
def test_win32():
    nt.assert_not_equal(sys.platform, 'win32',
                        "This test can't run under windows")
Beispiel #53
0
    def iter2backoff_batches(cls, iter, batch_chunksize_list, buffer_size):
        logger = FoxylibLogger.func_level2logger(cls.iter2backoff_batches,
                                                 logging.DEBUG)

        n = buffer_size
        m = len(batch_chunksize_list)

        queue_list = [[] for _ in range(m)]
        h_i2j = {}
        h_i2out = {}
        buffer_in = [None] * (n + 1)

        # f_batch_list = lmap(ig(0), batch_chunksize_list)
        # chunksize_list = lmap(ig(1), batch_chunksize_list)

        def i2next(i):
            return (i + 1) % (n + 1)

        def append2j(i, j):
            h_i2j[i] = j
            queue_list[j].append(i)

        def iy2out(i, y):
            h_i2out[i] = y
            h_i2j.pop(i)

        def i2j_new(i, j_old):
            j_new = j_old + 1
            h_i2j[i] = j_new
            queue_list[j_new].append(i)

        def j2batch(j):

            if not queue_list[j]:
                return

            f_batch, chunksize = batch_chunksize_list[j]

            i_list = queue_list[j][:chunksize]
            queue_list[j] = queue_list[j][chunksize:]

            x_list = [buffer_in[i] for i in i_list]
            by_list = f_batch(x_list)

            if j == m - 1:
                ix_list = [(i, buffer_in[i])
                           for i, (b, y) in zip_strict(i_list, by_list)
                           if not b]
                assert_false(ix_list)

            for i, (b, y) in zip_strict(i_list, by_list):
                if b:
                    iy2out(i, y)
                else:
                    i2j_new(i, j)

            logger.debug({
                "step": "j2batch",
                "j": j,
                "h_i2out": h_i2out,
                "h_i2j": h_i2j,
            })

        def i2yield(i):

            assert_in(i, h_i2out)
            assert_not_in(i, h_i2j)

            y = h_i2out.pop(i)

            logger.debug({"step": "i2yield", "i": i, "y": y})
            return y

        i_head = -1
        i_tail = -1
        for x in iter:
            i_head = i2next(i_head)
            logger.debug({"i_head": i_head})

            assert_not_in(i_head, h_i2j)
            buffer_in[i_head] = x

            append2j(i_head, 0)

            if len(h_i2j) + len(h_i2out) < n:
                continue

            i_tail = i2next(i_tail)
            logger.debug({
                "i_tail": i_tail,
                "h_i2j": h_i2j,
                "h_i2out": h_i2out,
            })

            assert_not_equal(i_tail in h_i2j, i_tail in h_i2out)

            if i_tail in h_i2out:
                yield i2yield(i_tail)
                continue

            j_tail = h_i2j[i_tail]

            for j in range(j_tail, m):
                j2batch(j)
                if i_tail not in h_i2out:  # not done
                    continue

                yield i2yield(i_tail)
                break

        for j in range(m):
            while queue_list[j]:
                j2batch(j)

        assert_false(h_i2j)

        while h_i2out:
            i_tail = i2next(i_tail)
            yield i2yield(i_tail)
Beispiel #54
0
def test_fetch_coords_power_2011():
    bunch = atlas.fetch_coords_power_2011()
    assert_equal(len(bunch.rois), 264)
    assert_not_equal(bunch.description, '')
Beispiel #55
0
 def test_get_ssdeep(self):
     try:
         import pydeep
         assert_not_equal(None, self.file.get_ssdeep())
     except ImportError:
         assert_equal(None, self.file.get_ssdeep())
Beispiel #56
0
def test_fasta__inequality():
    assert_not_equal(FASTA("A", "B", "C"), FASTA("A", "B", "D"))
    assert_not_equal(FASTA("A", "B", "C"), FASTA("A", None, "C"))
    assert_not_equal(FASTA("A", "B", "C"), FASTA("D", "B", "C"))
def test_collection_neq():
    elements = ["a", "b", "c"]
    c1 = Collection(elements)
    c2 = Collection(elements[:2])
    assert_not_equal(c1, c2)
Beispiel #58
0
 def test_eternity(self):
     dt = d(2001, 1, 1)
     assert_less(dt, eternity)
     assert_less_equal(dt, eternity)
     assert_greater(eternity, dt)
     assert_greater_equal(eternity, dt)
     assert_less(-eternity, dt)
     assert_less_equal(-eternity, dt)
     assert_greater(dt, -eternity)
     assert_greater_equal(dt, -eternity)
     assert_less(-eternity, eternity)
     assert_less_equal(-eternity, eternity)
     assert_greater(eternity, -eternity)
     assert_greater_equal(eternity, -eternity)
     assert_equal(eternity, eternity)
     assert_equal(-eternity, -eternity)
     assert_not_equal(dt, eternity)
     assert_not_equal(eternity, dt)
     assert_not_equal(dt, -eternity)
     assert_not_equal(-eternity, dt)
     assert_not_equal(-eternity, eternity)
     assert_not_equal(eternity, -eternity)
     assert_equal([-eternity, dt, eternity],
                  sorted([dt, eternity, -eternity]))
     assert_equal(eternity, --eternity)
     assert_equal(-eternity, ---eternity)
Beispiel #59
0
    def test_undertaker(self):
        """ UNDERTAKER (CORE): Test the undertaker. """
        tmp_scope = 'mock'
        nbdatasets = 5
        nbfiles = 5

        set_account_limit('jdoe', get_rse_id('MOCK'), -1)

        dsns1 = [{
            'name': 'dsn_%s' % generate_uuid(),
            'scope': tmp_scope,
            'type': 'DATASET',
            'lifetime': -1
        } for i in range(nbdatasets)]

        dsns2 = [{
            'name':
            'dsn_%s' % generate_uuid(),
            'scope':
            tmp_scope,
            'type':
            'DATASET',
            'lifetime':
            -1,
            'rules': [{
                'account': 'jdoe',
                'copies': 1,
                'rse_expression': 'MOCK',
                'grouping': 'DATASET'
            }]
        } for i in range(nbdatasets)]

        add_dids(dids=dsns1 + dsns2, account='root')

        replicas = list()
        for dsn in dsns1 + dsns2:
            files = [{
                'scope': tmp_scope,
                'name': 'file_%s' % generate_uuid(),
                'bytes': 1,
                'adler32': '0cc737eb',
                'tombstone': datetime.utcnow() + timedelta(weeks=2),
                'meta': {
                    'events': 10
                }
            } for i in range(nbfiles)]
            attach_dids(scope=tmp_scope,
                        name=dsn['name'],
                        rse='MOCK',
                        dids=files,
                        account='root')
            replicas += files

        add_rules(dids=dsns1,
                  rules=[{
                      'account': 'jdoe',
                      'copies': 1,
                      'rse_expression': 'MOCK',
                      'grouping': 'DATASET'
                  }])

        undertaker(worker_number=1, total_workers=1, once=True)
        undertaker(worker_number=1, total_workers=1, once=True)

        for replica in replicas:
            assert_not_equal(
                get_replica(scope=replica['scope'],
                            name=replica['name'],
                            rse='MOCK')['tombstone'], None)
Beispiel #60
0
  def test_get_sample_data(cls):
    data = cls.indexer.get_sample_data({}, database='hue', table='desktop_document2', column='id')

    assert_equal(0, data['status'], data)
    assert_not_equal('', data['rows'], data)