Example #1
1
def merge_with(fn, *dicts):
    """ Merge dictionaries and apply function to combined values

    A key may occur in more than one dict, and all values mapped from the key
    will be passed to the function as a list, such as fn([val1, val2, ...]).

    >>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20})
    {1: 11, 2: 22}

    >>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30})  # doctest: +SKIP
    {1: 1, 2: 2, 3: 30}

    See Also:
        merge
    """
    if len(dicts) == 1 and not isinstance(dicts[0], dict):
        dicts = dicts[0]

    result = dict()
    for d in dicts:
        for k, v in d.items():
            try:
                result[k].append(v)
            except:
                result[k] = [v]
    return dict((k, fn(v)) for k, v in result.items())
Example #2
1
    def __init__(self, session: Session, config, loop=None):
        self.logger = logging.getLogger(__name__)
        self.session = session
        self.config = config
        if loop is None:
            self._loop = asyncio.get_event_loop()
        else:
            self._loop = loop
        self._reader_task = None
        self._writer_task = None
        self._inflight_task = None
        self._reader_ready = asyncio.Event(loop=self._loop)
        self._writer_ready = asyncio.Event(loop=self._loop)
        self._inflight_ready = asyncio.Event(loop=self._loop)
        self._inflight_changed = asyncio.Condition(loop=self._loop)

        self._running = False

        self.session.local_address, self.session.local_port = self.session.writer.get_extra_info("sockname")

        self.incoming_queues = dict()
        for p in PacketType:
            self.incoming_queues[p] = asyncio.Queue()
        self.outgoing_queue = asyncio.Queue()
        self.inflight_messages = dict()
 def test_bad_revparse(self):
     self.setupStep(
         bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk',
                 mode='incremental'))
     self.expectCommands(
         ExpectShell(workdir='wkdir',
                     command=['bzr', '--version'])
         + 0,
         Expect('stat', dict(file='wkdir/.buildbot-patched',
                             logEnviron=True))
         + 1,
         Expect('stat', dict(file='wkdir/.bzr',
                             logEnviron=True))
         + 1,
         ExpectShell(workdir='wkdir',
                     command=['bzr', 'checkout',
                              'http://bzr.squid-cache.org/bzr/squid3/trunk', '.'])
         + 0,
         ExpectShell(workdir='wkdir',
                     command=['bzr', 'version-info', '--custom', "--template='{revno}"])
         + ExpectShell.log('stdio',
                           stdout='oiasdfj010laksjfd')
         + 0,
     )
     self.expectOutcome(result=FAILURE, status_text=["updating"])
     return self.runStep()
Example #4
0
    def _get_store_model(self, imsgstore):
        dct = {}

        props = dict(
            size=tags.PR_MESSAGE_SIZE_EXTENDED,
            guid=tags.PR_RECORD_KEY,
            lastLogon=tags.PR_LAST_LOGON_TIME,
            userEntryId=tags.PR_MAILBOX_OWNER_ENTRYID,
        )

        storemodel = MapiObject(imsgstore, props)
        dct.update(dict(
            lastLogon=storemodel.lastLogon,
            size=storemodel.size,
            zarafaId=binguid_to_hexstr(storemodel.guid),
        ))

        if storemodel.userEntryId:
            imailuser = self.session.OpenEntry(storemodel.userEntryId, None, 0)
            usermodel = MapiObject(imailuser, dict(userid=tags.PR_ACCOUNT_W))

            dct.update(dict(
                storeName=usermodel.userid,
            ))

        return Store(initial=dct)
 def test_mode_full_timeout(self):
     self.setupStep(
         bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk',
                 mode='full', method='fresh', timeout=1))
     self.expectCommands(
         ExpectShell(workdir='wkdir',
                     timeout=1,
                     command=['bzr', '--version'])
         + 0,
         Expect('stat', dict(file='wkdir/.buildbot-patched',
                             logEnviron=True))
         + 1,
         Expect('stat', dict(file='wkdir/.bzr',
                             logEnviron=True))
         + 0,
         ExpectShell(workdir='wkdir',
                     timeout=1,
                     command=['bzr', 'clean-tree', '--force'])
         + 0,
         ExpectShell(workdir='wkdir',
                     timeout=1,
                     command=['bzr', 'update'])
         + 0,
         ExpectShell(workdir='wkdir',
                     timeout=1,
                     command=['bzr', 'version-info', '--custom', "--template='{revno}"])
         + ExpectShell.log('stdio',
                           stdout='100')
         + 0,
     )
     self.expectOutcome(result=SUCCESS, status_text=["update"])
     self.expectProperty('got_revision', '100', 'Bzr')
     return self.runStep()
Example #6
0
def delete_plugin():
    """ Object delete handler """
    app=request.args(0)
    plugin = request.args(1)
    plugin_name='plugin_'+plugin
    if 'nodelete' in request.vars:
        redirect(URL('design',args=app))
    elif 'delete' in request.vars:
        try:
            for folder in ['models','views','controllers','static','modules']:
                path=os.path.join(apath(app,r=request),folder)
                for item in os.listdir(path):
                    if item.rsplit('.',1)[0] == plugin_name:
                        filename=os.path.join(path,item)
                        if os.path.isdir(filename):
                            shutil.rmtree(filename)
                        else:
                            os.unlink(filename)
            session.flash = T('plugin "%(plugin)s" deleted',
                              dict(plugin=plugin))
        except Exception:
            session.flash = T('unable to delete file plugin "%(plugin)s"',
                              dict(plugin=plugin))
        redirect(URL('design',args=request.args(0)))
    return dict(plugin=plugin)
def create_filetree(path=None, depth=0, max_depth=0):

    tree = None

    if max_depth == 0 or depth < max_depth:
        if path is None:
            path = os.getcwd()

        tree = dict(name=os.path.basename(path), children=[])

        try:
            lst = os.listdir(path)
        except OSError:
            pass  # ignore errors
        else:
            for name in lst:
                fn = os.path.join(path, name)
                if (os.path.isdir(fn) and
                        re.match('^.*(Compiled)$', fn) is None):
                    child = create_filetree(fn, depth + 1, max_depth)
                    if child is not None:
                        tree['children'].append(child)
                elif re.match('^.*\.(m|def|txt|csv)$', fn) is not None:
                    tree['children'].append(dict(name=fn.replace(
                        os.getcwd() + os.path.sep, "")))

    return tree
Example #8
0
def user():
    if MULTI_USER_MODE:
        if not db(db.auth_user).count():
            auth.settings.registration_requires_approval = False
        return dict(form=auth())
    else:
        return dict(form=T("Disabled"))
Example #9
0
def site():
    """ Site handler """

    myversion = request.env.web2py_version

    # Shortcut to make the elif statements more legible
    file_or_appurl = 'file' in request.vars or 'appurl' in request.vars

    if DEMO_MODE:
        pass

    elif request.vars.filename and not 'file' in request.vars:
        # create a new application
        appname = cleanpath(request.vars.filename).replace('.', '_')
        if app_create(appname, request):
            if MULTI_USER_MODE:
                db.app.insert(name=appname,owner=auth.user.id)
            session.flash = T('new application "%s" created', appname)
            redirect(URL('design',args=appname))
        else:
            session.flash = \
                T('unable to create application "%s" (it may exist already)', request.vars.filename)
        redirect(URL(r=request))

    elif file_or_appurl and not request.vars.filename:
        # can't do anything without an app name
        msg = 'you must specify a name for the uploaded application'
        response.flash = T(msg)

    elif file_or_appurl and request.vars.filename:
        # fetch an application via URL or file upload
        f = None
        if request.vars.appurl is not '':
            try:
                f = urllib.urlopen(request.vars.appurl)
            except Exception, e:
                session.flash = DIV(T('Unable to download app because:'),PRE(str(e)))
                redirect(URL(r=request))
            fname = request.vars.appurl
        elif request.vars.file is not '':
            f = request.vars.file.file
            fname = request.vars.file.filename

        if f:
            appname = cleanpath(request.vars.filename).replace('.', '_')
            installed = app_install(appname, f, request, fname,
                                    overwrite=request.vars.overwrite_check)
        if f and installed:
            msg = 'application %(appname)s installed with md5sum: %(digest)s'
            session.flash = T(msg, dict(appname=appname,
                                        digest=md5_hash(installed)))
        elif f and request.vars.overwrite_check:
            msg = 'unable to install application "%(appname)s"'
            session.flash = T(msg, dict(appname=request.vars.filename))

        else:
            msg = 'unable to install application "%(appname)s"'
            session.flash = T(msg, dict(appname=request.vars.filename))

        redirect(URL(r=request))
Example #10
0
def main_interface():
    cur = g.db.execute('select id, provider, num_affected, end_time, flagged from outages where start_time<? and (end_time is null or end_time>? ) and hidden==0 order by start_time desc',
            [datetime.datetime.now(), datetime.datetime.now()])
    outages = [dict(id=row[0], provider=row[1],
        num_affected=row[2], end_time=row[3], flagged=row[4]) for row in cur.fetchall()]

    for outage in outages:
        cur = g.db.execute('select provider_ref_id from outage_texts where rowid==?', [outage['id']])
        outage['provider_ref'] = cur.fetchone()
        cur = g.db.execute('select dslusers_id from outages_dslusers_rel where outages_id==?', [outage['id']])

        dsluser_ids = '('
        for row in cur.fetchall():
            dsluser_ids += str(row[0]) + ', '
        dsluser_ids = dsluser_ids.rstrip().rstrip(',')
        dsluser_ids += ')'

        cur = g.db.execute('select id, asid, account_name, phone_number, user_name from dslusers where id IN ' + dsluser_ids)
        outage['users'] = [dict(id=row[0], asid=row[1], account_name=row[2], phone_number=row[3], user_name=row[4]) for row in cur.fetchall()]
        
        cur = g.db.execute('select count(*) from outages_dslusers_rel where outages_id==?', [outage['id']])
        outage['our_affected'] = cur.fetchone()[0]

    return render_template('base.html', queues=queues, notes=notes,
            outages=outages)
Example #11
0
    def _sourcedirIsUpdatable(self):
        myFileWriter = StringFileWriter()
        args = {
                'workdir': self.build.path_module.join(self.workdir, 'CVS'),
                'writer': myFileWriter,
                'maxsize': None,
                'blocksize': 32*1024,
                }

        cmd = buildstep.RemoteCommand('uploadFile',
                dict(slavesrc='Root', **args),
                ignore_updates=True)
        yield self.runCommand(cmd)
        if cmd.rc is not None and cmd.rc != 0:
            defer.returnValue(False)
            return
        if myFileWriter.buffer.strip() != self.cvsroot:
            defer.returnValue(False)
            return

        myFileWriter.buffer = ""
        cmd = buildstep.RemoteCommand('uploadFile',
                dict(slavesrc='Repository', **args),
                ignore_updates=True)
        yield self.runCommand(cmd)
        if cmd.rc is not None and cmd.rc != 0:
            defer.returnValue(False)
            return
        if myFileWriter.buffer.strip() != self.cvsmodule:
            defer.returnValue(False)
            return

        defer.returnValue(True)
Example #12
0
    def testCleanUp(self):
        class TestableTest(unittest2.TestCase):
            def testNothing(self):
                pass

        test = TestableTest('testNothing')
        self.assertEqual(test._cleanups, [])

        cleanups = []

        def cleanup1(*args, **kwargs):
            cleanups.append((1, args, kwargs))

        def cleanup2(*args, **kwargs):
            cleanups.append((2, args, kwargs))

        test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
        test.addCleanup(cleanup2)

        self.assertEqual(test._cleanups,
                         [(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
                          (cleanup2, (), {})])

        result = test.doCleanups()
        self.assertTrue(result)

        self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
Example #13
0
def check(cmd, mf):
    m = mf.findNode('PyQt5')
    if m and not isinstance(m, MissingModule):
        try:
            # PyQt5 with sipconfig module, handled
            # by sip recipe
            import sipconfig
            return None

        except ImportError:
            pass

        # All imports are done from C code, hence not visible
        # for modulegraph
        # 1. Use of 'sip'
        # 2. Use of other modules, datafiles and C libraries
        #    in the PyQt5 package.
        mf.import_hook('sip', m)
        if sys.version[0] != 2:
            return dict(packages=['PyQt5'],
                        expected_missing_imports=set(['copy_reg', 'cStringIO', 'StringIO']))
        else:
            return dict(packages=['PyQt5'])

    return None
Example #14
0
def list_available(bitcoind):
    address_summary = dict()

    address_to_account = dict()
    for info in bitcoind.listreceivedbyaddress(0):
        address_to_account[info["address"]] = info["account"]

    unspent = bitcoind.listunspent(0)
    for output in unspent:
        # listunspent doesn't give addresses, so:
        rawtx = bitcoind.getrawtransaction(output['txid'], 1)
        vout = rawtx["vout"][output['vout']]
        pk = vout["scriptPubKey"]

        # This code only deals with ordinary pay-to-bitcoin-address
        # or pay-to-script-hash outputs right now; anything exotic is ignored.
        if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
            continue
        
        address = pk["addresses"][0]
        if address in address_summary:
            address_summary[address]["total"] += vout["value"]
            address_summary[address]["outputs"].append(output)
        else:
            address_summary[address] = {
                "total" : vout["value"],
                "outputs" : [output],
                "account" : address_to_account.get(address, "")
                }

    return address_summary
Example #15
0
    def __init__(self, *args, **kwargs):
        """Constructor to resolve values for all Parameters.

        For example, the Task::

            class MyTask(luigi.Task):
                count = luigi.IntParameter()

        can be instantiated as ``MyTask(count=10)``.
        """
        params = self.get_params()
        param_values = self.get_param_values(params, args, kwargs)

        # Set all values on class instance
        for key, value in param_values:
            setattr(self, key, value)

        # Register args and kwargs as an attribute on the class. Might be useful
        self.param_args = tuple(value for key, value in param_values)
        self.param_kwargs = dict(param_values)

        # Build up task id
        task_id_parts = []
        param_objs = dict(params)
        for param_name, param_value in param_values:
            if dict(params)[param_name].significant:
                task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))

        self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts))
        self.__hash = hash(self.task_id)
Example #16
0
 def _compute_graph(self):
     stack=[self.begin]
     index=dict()
     rindex=dict()
     count=Counter()
     graph=dict()
     def enter(state):
         n = count.get()
         index[state] = n
         rindex[n] = state
     enter(self.begin)
     while stack:
         I=stack.pop()
         table=dict()
         for X in self.grammar.symbols:
             transition=self.transition(I,X)
             if transition:
                 table[X]=transition
                 if transition not in index:
                     enter(transition)
                     stack.append(transition)
         graph[I]=table
     self._graph=graph
     self._index=index
     self._rindex=rindex
Example #17
0
    def __compute_alternative_params(self):
        # Copied directly from skopt
        transformed_bounds = np.array(self.__opt.space.transformed_bounds)
        est = clone(self.__opt.base_estimator)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(self.__opt.space.transform(self.__opt.Xi), self.__opt.yi)

        X = self.__opt.space.transform(self.__opt.space.rvs(
            n_samples=self.__opt.n_points, random_state=self.__opt.rng))

        values = _gaussian_acquisition(X=X, model=est, y_opt=np.min(self.__opt.yi),
                                       acq_func='EI',
                                       acq_func_kwargs=dict(n_points=10000))

        print('original point ei: %s' % np.min(values))
        discount_width = .5
        values = self.__discount_leased_params(X, values, discount_width)
        while np.min(values) > -1e-5 and discount_width > 1e-2:
            discount_width *= .9
            values = _gaussian_acquisition(X=X, model=est, y_opt=np.min(self.__opt.yi),
                                           acq_func='EI',
                                           acq_func_kwargs=dict(n_points=10000))
            values = self.__discount_leased_params(X, values, discount_width)
        next_x = X[np.argmin(values)]
        print('new point ei: %s' % np.min(values))

        if not self.__opt.space.is_categorical:
            next_x = np.clip(next_x, transformed_bounds[:, 0], transformed_bounds[:, 1])

        return self.__opt.space.inverse_transform(next_x.reshape((1, -1)))[0]
Example #18
0
def edit_wiki_settings(node, auth, **kwargs):
    wiki_settings = node.get_addon('wiki')
    permissions = request.get_json().get('permission', None)

    if not wiki_settings:
        raise HTTPError(http.BAD_REQUEST, data=dict(
            message_short='Invalid request',
            message_long='Cannot change wiki settings without a wiki'
        ))

    if permissions == 'public':
        permissions = True
    elif permissions == 'private':
        permissions = False
    else:
        raise HTTPError(http.BAD_REQUEST, data=dict(
            message_short='Invalid request',
            message_long='Permissions flag used is incorrect.'
        ))

    try:
        wiki_settings.set_editing(permissions, auth, log=True)
    except NodeStateError as e:
        raise HTTPError(http.BAD_REQUEST, data=dict(
            message_short="Can't change privacy",
            message_long=e.message
        ))

    return {
        'status': 'success',
        'permissions': permissions,
    }
Example #19
0
 def test_height(self):
     prefix='thumb_'
     meta = dict(thumbnails=[dict(height=50, prefix=prefix, include=['*.jpg'])])
     self._test_generic_thumbnails(meta)
     for fn in IMAGES:
         im = Image.open(self._deployed_image(prefix, fn))
         assert im.size[1] == 50
def test_failed_overwrite():
    table = Table.create('messages', schema=[
        HashKey('id'),
    ], throughput={
        'read': 7,
        'write': 3,
    })

    data1 = {'id': '123', 'data': '678'}
    table.put_item(data=data1)

    data2 = {'id': '123', 'data': '345'}
    table.put_item(data=data2, overwrite=True)

    data3 = {'id': '123', 'data': '812'}
    table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException)

    returned_item = table.lookup('123')
    dict(returned_item).should.equal(data2)

    data4 = {'id': '124', 'data': 812}
    table.put_item(data=data4)

    returned_item = table.lookup('124')
    dict(returned_item).should.equal(data4)
def test_item_add_and_describe_and_update():
    table = create_table()

    data = {
        'forum_name': 'LOLCat Forum',
        'Body': 'http://url_to_lolcat.gif',
        'SentBy': 'User A',
    }

    table.put_item(data=data)
    returned_item = table.get_item(forum_name="LOLCat Forum")
    returned_item.should_not.be.none

    dict(returned_item).should.equal({
        'forum_name': 'LOLCat Forum',
        'Body': 'http://url_to_lolcat.gif',
        'SentBy': 'User A',
    })

    returned_item['SentBy'] = 'User B'
    returned_item.save(overwrite=True)

    returned_item = table.get_item(
        forum_name='LOLCat Forum'
    )
    dict(returned_item).should.equal({
        'forum_name': 'LOLCat Forum',
        'Body': 'http://url_to_lolcat.gif',
        'SentBy': 'User B',
    })
def when_i_post_put_to_url_with_params(context, name, password,method, url, params = SKIP, admin = False, content_type = "application/x-www-form-urlencoded"):
    full_url = "%s%s" % (context.app_config.app_url_prefix,url) if admin is False else "%s%s" % (context.app_config.admin_url_prefix,url)
    h = httplib2.Http()
    h.add_credentials(name,password)
    # convert string params to dictionary
    form_params = {}
    if params is not SKIP:
        form_params = dict(urlparse.parse_qsl(params))

    # handle both
    content = params.encode("utf-8")

    if content_type is "application/x-www-form-urlencoded":
        content = urllib.urlencode(form_params)

    if method in {"PUT","POST","PATCH"}:
        context.http_headers["content-type"] = content_type

    # encode http headers
    context.http_headers = dict((k.encode('ascii') if isinstance(k, unicode) else k,
                                 v.encode('ascii') if isinstance(v, unicode) else v)
        for k,v in context.http_headers.items())

    context.resp, context.content = h.request(full_url.encode("utf-8"), method.encode("utf-8"), content, headers=context.http_headers)
    context.http_headers = {}
    def close(self):
        """
        Shut down the UnitManager, and all umgr components.
        """

        # we do not cancel units at this point, in case any component or pilot
        # wants to continue to progress unit states, which should indeed be
        # independent from the umgr life cycle.

        if self._closed:
            return

        self._terminate.set()
        self.stop()

        self._rep.info('<<close unit manager')

        # we don't want any callback invokations during shutdown
        # FIXME: really?
        with self._cb_lock:
            self._callbacks = dict()
            for m in rpt.UMGR_METRICS:
                self._callbacks[m] = dict()

        self._log.info("Closed UnitManager %s." % self._uid)

        self._closed = True
        self._rep.ok('>>ok\n')
Example #24
0
 def test_existing_lcd_partial(self):
     commit1 = self._add_commit('Commit 1', ['file1'])
     commit2 = self._add_commit('Commit 2', ['file1', 'file2'], ['file2'], [commit1])
     commit3 = self._add_commit('Commit 3', ['file1', 'file2', 'file3'], ['file3'], [commit2])
     commit4 = self._add_commit('Commit 4', ['file1', 'file2', 'file3', 'file4'], ['file2', 'file4'], [commit3])
     prev_lcd = M.repository.LastCommit(
         path='',
         commit_id=commit3._id,
         entries=[
             dict(
                 name='file1',
                 commit_id=commit1._id),
             dict(
                 name='file2',
                 commit_id=commit2._id),
             dict(
                 name='file3',
                 commit_id=commit3._id),
         ],
     )
     session(prev_lcd).flush()
     lcd = M.repository.LastCommit.get(commit4.tree)
     self.assertEqual(self.repo._commits[lcd.commit_id].message, commit4.message)
     self.assertEqual(lcd.path, '')
     self.assertEqual(len(lcd.entries), 4)
     self.assertEqual(lcd.by_name['file1'], commit1._id)
     self.assertEqual(lcd.by_name['file2'], commit4._id)
     self.assertEqual(lcd.by_name['file3'], commit3._id)
     self.assertEqual(lcd.by_name['file4'], commit4._id)
Example #25
0
    def testGetSetProperties(self):
        self.addEngine(4)
        dikt = dict(a=5, b='asdf', c=True, d=None, e=list(range(5)))
        d= self.multiengine.set_properties(dikt)
        d.addCallback(lambda r: self.multiengine.get_properties())
        d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
        d.addCallback(lambda r: self.multiengine.get_properties(('c',)))
        d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
        d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False)))
        d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd')))
        d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))

        #Non-blocking
        d.addCallback(lambda r: self.multiengine.set_properties(dikt, block=False))
        d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
        d.addCallback(lambda r: self.multiengine.get_properties(block=False))
        d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
        d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
        d.addCallback(lambda r: self.multiengine.get_properties(('c',), block=False))
        d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
        d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
        d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False), block=False))
        d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
        d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd'), block=False))
        d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
        d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))
        return d
Example #26
0
    def save(self, request, datastream_rev=None, visualization_rev=None):
        if datastream_rev:
            lifecycle = VisualizationLifeCycleManager(user=request.user)
            visualization_rev = lifecycle.create(datastream_rev, language=request.user.language,  **self.cleaned_data)

            return dict(
                status='ok',
                revision_id=visualization_rev.id,
                messages=[ugettext('APP-VISUALIZATION-CREATEDSUCCESSFULLY-TEXT')]
            )
        elif visualization_rev:
            lifecycle = VisualizationLifeCycleManager(
                user=request.user,
                visualization_revision_id=visualization_rev['visualization_revision_id']
            )
            visualization_rev = lifecycle.edit(
                language=request.auth_manager.language,
                changed_fields=self.changed_data,
                **self.cleaned_data
            )
            return dict(
                status='ok',
                revision_id=visualization_rev.id,
                messages=[ugettext('APP-VISUALIZATION-CREATEDSUCCESSFULLY-TEXT')]
            )
Example #27
0
    def get_sortable_columns(self):
        """
            Returns a dictionary of the sortable columns. Key is a model
            field name and value is sort column (for example - attribute).

            If `column_sortable_list` is set, will use it. Otherwise, will call
            `scaffold_sortable_columns` to get them from the model.
        """
        self._sortable_joins = dict()

        if self.column_sortable_list is None:
            return self.scaffold_sortable_columns()
        else:
            result = dict()

            for c in self.column_sortable_list:
                if isinstance(c, tuple):
                    column, path = self._get_field_with_path(c[1])
                    column_name = c[0]
                elif isinstance(c, InstrumentedAttribute):
                    column, path = self._get_field_with_path(c)
                    column_name = str(c)
                else:
                    column, path = self._get_field_with_path(c)
                    column_name = c

                result[column_name] = column

                if path:
                    self._sortable_joins[column_name] = path

            return result
Example #28
0
    def test_ignore_multiple_matches(self):
        self.assertEqual(
            self.interpret_bootstrap_stderr([
                dict(
                    action_num=0,
                    node_id='i-b659f519',
                    path=('s3://bucket/tmp/logs/j-1EE0CL1O7FDXU/node/'
                          'i-b659f519/bootstrap-actions/1/stderr.gz'),
                ),
                dict(
                    action_num=0,
                    node_id='i-e647eb49',
                    path=('s3://bucket/tmp/logs/j-1EE0CL1O7FDXU/node/'
                          'i-e647eb49/bootstrap-actions/1/stderr.gz'),
                ),

            ]),
            dict(
                errors=[dict(
                    action_num=0,
                    node_id='i-b659f519',
                    task_error=dict(
                        message='BOOM!\n',
                        path=('s3://bucket/tmp/logs/j-1EE0CL1O7FDXU/node/'
                              'i-b659f519/bootstrap-actions/1/stderr.gz'),
                    ),
                )],
                partial=True,
            )
        )

        self.mock_cat_log.called_once_with(
            self.mock_fs, ('s3://bucket/tmp/logs/j-1EE0CL1O7FDXU/node/'
                           'i-b659f519/bootstrap-actions/1/stderr.gz'))
Example #29
0
    def parse(self, basefile):
        # Find out possible skeleton entries by loading the entire
        # graph of resource references, and find resources that only
        # exist as objects.
        #
        # Note: if we used download_from_triplestore we know that this list
        #       is clean -- we could just iterate the graph w/o filtering
        g = Graph()
        self.log.info("Parsing %s" % basefile)
        g.parse(self.store.downloaded_path(basefile), format="nt")
        self.log.info("Compiling object set")
        # create a uri -> True dict mapping -- maybe?
        objects = dict(zip([str(o).split("#")[0] for (s, p, o) in g], True))
        self.log.info("Compiling subject set")
        subjects = dict(zip([str(s).split("#")[0] for (s, p, o) in g], True))
        self.log.info("%s objects, %s subjects. Iterating through existing objects" %
                      (len(objects), len(subjects)))

        for o in objects:
            if not o.startswith(self.config.url):
                continue
            if '9999:999' in o:
                continue
            if o in subjects:
                continue
            for repo in otherrepos:
                skelbase = repo.basefile_from_uri(repo)
                if skelbase:
                    skel = repo.triples_from_uri(o)  # need to impl
                    with self.store.open_distilled(skelbase, "wb") as fp:
                        fp.write(skel.serialize(format="pretty-xml"))

                    self.log.info("Created skel for %s" % o)
Example #30
0
 def backup(self, context, instance, name, backup_type, rotation,
            extra_properties=None):
     self.extra_props_last_call = extra_properties
     props = dict(backup_type=backup_type,
                  rotation=rotation)
     props.update(extra_properties or {})
     return dict(id='123', status='ACTIVE', name=name, properties=props)
Example #31
0
def apply_event(state: Dict[str, Any],
                event: Dict[str, Any],
                user_profile: UserProfile,
                client_gravatar: bool,
                include_subscribers: bool) -> None:
    if event['type'] == "message":
        state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
        if 'raw_unread_msgs' in state:
            apply_unread_message_event(
                user_profile,
                state['raw_unread_msgs'],
                event['message'],
                event['flags'],
            )

        if event['message']['type'] != "stream":
            if 'raw_recent_private_conversations' in state:
                # Handle maintaining the recent_private_conversations data structure.
                conversations = state['raw_recent_private_conversations']
                recipient_id = get_recent_conversations_recipient_id(
                    user_profile, event['message']['recipient_id'],
                    event['message']["sender_id"])

                if recipient_id not in conversations:
                    conversations[recipient_id] = dict(
                        user_ids=[user_dict['id'] for user_dict in
                                  event['message']['display_recipient'] if
                                  user_dict['id'] != user_profile.id]
                    )
                conversations[recipient_id]['max_message_id'] = event['message']['id']
            return

        # Below, we handle maintaining first_message_id.
        for sub_dict in state.get('subscriptions', []):
            if event['message']['stream_id'] == sub_dict['stream_id']:
                if sub_dict['first_message_id'] is None:
                    sub_dict['first_message_id'] = event['message']['id']
        for stream_dict in state.get('streams', []):
            if event['message']['stream_id'] == stream_dict['stream_id']:
                if stream_dict['first_message_id'] is None:
                    stream_dict['first_message_id'] = event['message']['id']

    elif event['type'] == "hotspots":
        state['hotspots'] = event['hotspots']
    elif event['type'] == "custom_profile_fields":
        state['custom_profile_fields'] = event['fields']
    elif event['type'] == "pointer":
        state['pointer'] = max(state['pointer'], event['pointer'])
    elif event['type'] == "realm_user":
        person = event['person']
        person_user_id = person['user_id']

        if event['op'] == "add":
            person = copy.deepcopy(person)
            if client_gravatar:
                if 'gravatar.com' in person['avatar_url']:
                    person['avatar_url'] = None
            person['is_active'] = True
            if not person['is_bot']:
                person['profile_data'] = {}
            state['raw_users'][person_user_id] = person
        elif event['op'] == "remove":
            state['raw_users'][person_user_id]['is_active'] = False
        elif event['op'] == 'update':
            is_me = (person_user_id == user_profile.id)

            if is_me:
                if ('avatar_url' in person and 'avatar_url' in state):
                    state['avatar_source'] = person['avatar_source']
                    state['avatar_url'] = person['avatar_url']
                    state['avatar_url_medium'] = person['avatar_url_medium']

                for field in ['is_admin', 'delivery_email', 'email', 'full_name']:
                    if field in person and field in state:
                        state[field] = person[field]

                # In the unlikely event that the current user
                # just changed to/from being an admin, we need
                # to add/remove the data on all bots in the
                # realm.  This is ugly and probably better
                # solved by removing the all-realm-bots data
                # given to admin users from this flow.
                if ('is_admin' in person and 'realm_bots' in state):
                    prev_state = state['raw_users'][user_profile.id]
                    was_admin = prev_state['is_admin']
                    now_admin = person['is_admin']

                    if was_admin and not now_admin:
                        state['realm_bots'] = []
                    if not was_admin and now_admin:
                        state['realm_bots'] = get_owned_bot_dicts(user_profile)

            if client_gravatar and 'avatar_url' in person:
                # Respect the client_gravatar setting in the `users` data.
                if 'gravatar.com' in person['avatar_url']:
                    person['avatar_url'] = None
                    person['avatar_url_medium'] = None

            if person_user_id in state['raw_users']:
                p = state['raw_users'][person_user_id]
                for field in p:
                    if field in person:
                        p[field] = person[field]
                    if 'custom_profile_field' in person:
                        custom_field_id = person['custom_profile_field']['id']
                        custom_field_new_value = person['custom_profile_field']['value']
                        if 'rendered_value' in person['custom_profile_field']:
                            p['profile_data'][custom_field_id] = {
                                'value': custom_field_new_value,
                                'rendered_value': person['custom_profile_field']['rendered_value']
                            }
                        else:
                            p['profile_data'][custom_field_id] = {
                                'value': custom_field_new_value
                            }

    elif event['type'] == 'realm_bot':
        if event['op'] == 'add':
            state['realm_bots'].append(event['bot'])

        if event['op'] == 'remove':
            email = event['bot']['email']
            for bot in state['realm_bots']:
                if bot['email'] == email:
                    bot['is_active'] = False

        if event['op'] == 'delete':
            state['realm_bots'] = [item for item
                                   in state['realm_bots'] if item['email'] != event['bot']['email']]

        if event['op'] == 'update':
            for bot in state['realm_bots']:
                if bot['email'] == event['bot']['email']:
                    if 'owner_id' in event['bot']:
                        bot['owner'] = get_user_profile_by_id(event['bot']['owner_id']).email
                    else:
                        bot.update(event['bot'])

    elif event['type'] == 'stream':
        if event['op'] == 'create':
            for stream in event['streams']:
                if not stream['invite_only']:
                    stream_data = copy.deepcopy(stream)
                    if include_subscribers:
                        stream_data['subscribers'] = []
                    stream_data['stream_weekly_traffic'] = None
                    stream_data['is_old_stream'] = False
                    stream_data['is_announcement_only'] = False
                    # Add stream to never_subscribed (if not invite_only)
                    state['never_subscribed'].append(stream_data)
                state['streams'].append(stream)
            state['streams'].sort(key=lambda elt: elt["name"])

        if event['op'] == 'delete':
            deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
            state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
            state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
                                         stream['stream_id'] not in deleted_stream_ids]

        if event['op'] == 'update':
            # For legacy reasons, we call stream data 'subscriptions' in
            # the state var here, for the benefit of the JS code.
            for obj in state['subscriptions']:
                if obj['name'].lower() == event['name'].lower():
                    obj[event['property']] = event['value']
                    if event['property'] == "description":
                        obj['rendered_description'] = event['rendered_description']
            # Also update the pure streams data
            for stream in state['streams']:
                if stream['name'].lower() == event['name'].lower():
                    prop = event['property']
                    if prop in stream:
                        stream[prop] = event['value']
                        if prop == 'description':
                            stream['rendered_description'] = event['rendered_description']
        elif event['op'] == "occupy":
            state['streams'] += event['streams']
        elif event['op'] == "vacate":
            stream_ids = [s["stream_id"] for s in event['streams']]
            state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
    elif event['type'] == 'default_streams':
        state['realm_default_streams'] = event['default_streams']
    elif event['type'] == 'default_stream_groups':
        state['realm_default_stream_groups'] = event['default_stream_groups']
    elif event['type'] == 'realm':
        if event['op'] == "update":
            field = 'realm_' + event['property']
            state[field] = event['value']

            if event['property'] == 'plan_type':
                # Then there are some extra fields that also need to be set.
                state['plan_includes_wide_organization_logo'] = event['value'] != Realm.LIMITED
                state['realm_upload_quota'] = event['extra_data']['upload_quota']

            # Tricky interaction: Whether we can create streams can get changed here.
            if (field in ['realm_create_stream_policy',
                          'realm_waiting_period_threshold']) and 'can_create_streams' in state:
                state['can_create_streams'] = user_profile.can_create_streams()

            if (field in ['realm_invite_to_stream_policy',
                          'realm_waiting_period_threshold']) and 'can_subscribe_other_users' in state:
                state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()
        elif event['op'] == "update_dict":
            for key, value in event['data'].items():
                state['realm_' + key] = value
                # It's a bit messy, but this is where we need to
                # update the state for whether password authentication
                # is enabled on this server.
                if key == 'authentication_methods':
                    state['realm_password_auth_enabled'] = (value['Email'] or value['LDAP'])
                    state['realm_email_auth_enabled'] = value['Email']
    elif event['type'] == "subscription":
        if not include_subscribers and event['op'] in ['peer_add', 'peer_remove']:
            return

        if event['op'] in ["add"]:
            if not include_subscribers:
                # Avoid letting 'subscribers' entries end up in the list
                for i, sub in enumerate(event['subscriptions']):
                    event['subscriptions'][i] = copy.deepcopy(event['subscriptions'][i])
                    del event['subscriptions'][i]['subscribers']

        def name(sub: Dict[str, Any]) -> str:
            return sub['name'].lower()

        if event['op'] == "add":
            added_names = set(map(name, event["subscriptions"]))
            was_added = lambda s: name(s) in added_names

            # add the new subscriptions
            state['subscriptions'] += event['subscriptions']

            # remove them from unsubscribed if they had been there
            state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]

            # remove them from never_subscribed if they had been there
            state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]

        elif event['op'] == "remove":
            removed_names = set(map(name, event["subscriptions"]))
            was_removed = lambda s: name(s) in removed_names

            # Find the subs we are affecting.
            removed_subs = list(filter(was_removed, state['subscriptions']))

            # Remove our user from the subscribers of the removed subscriptions.
            if include_subscribers:
                for sub in removed_subs:
                    sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]

            # We must effectively copy the removed subscriptions from subscriptions to
            # unsubscribe, since we only have the name in our data structure.
            state['unsubscribed'] += removed_subs

            # Now filter out the removed subscriptions from subscriptions.
            state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]

        elif event['op'] == 'update':
            for sub in state['subscriptions']:
                if sub['name'].lower() == event['name'].lower():
                    sub[event['property']] = event['value']
        elif event['op'] == 'peer_add':
            user_id = event['user_id']
            for sub in state['subscriptions']:
                if (sub['name'] in event['subscriptions'] and
                        user_id not in sub['subscribers']):
                    sub['subscribers'].append(user_id)
            for sub in state['never_subscribed']:
                if (sub['name'] in event['subscriptions'] and
                        user_id not in sub['subscribers']):
                    sub['subscribers'].append(user_id)
        elif event['op'] == 'peer_remove':
            user_id = event['user_id']
            for sub in state['subscriptions']:
                if (sub['name'] in event['subscriptions'] and
                        user_id in sub['subscribers']):
                    sub['subscribers'].remove(user_id)
    elif event['type'] == "presence":
        # TODO: Add user_id to presence update events / state format!
        presence_user_profile = get_user(event['email'], user_profile.realm)
        state['presences'][event['email']] = UserPresence.get_status_dict_by_user(
            presence_user_profile)[event['email']]
    elif event['type'] == "update_message":
        # We don't return messages in /register, so we don't need to
        # do anything for content updates, but we may need to update
        # the unread_msgs data if the topic of an unread message changed.
        if TOPIC_NAME in event:
            stream_dict = state['raw_unread_msgs']['stream_dict']
            topic = event[TOPIC_NAME]
            for message_id in event['message_ids']:
                if message_id in stream_dict:
                    stream_dict[message_id]['topic'] = topic
    elif event['type'] == "delete_message":
        max_message = Message.objects.filter(
            usermessage__user_profile=user_profile).order_by('-id').first()
        if max_message:
            state['max_message_id'] = max_message.id
        else:
            state['max_message_id'] = -1

        if 'raw_unread_msgs' in state:
            remove_id = event['message_id']
            remove_message_id_from_unread_mgs(state['raw_unread_msgs'], remove_id)

        # The remainder of this block is about maintaining recent_private_conversations
        if 'raw_recent_private_conversations' not in state or event['message_type'] != 'private':
            return

        recipient_id = get_recent_conversations_recipient_id(user_profile, event['recipient_id'],
                                                             event['sender_id'])

        # Ideally, we'd have test coverage for these two blocks.  To
        # do that, we'll need a test where we delete not-the-latest
        # messages or delete a private message not in
        # recent_private_conversations.
        if recipient_id not in state['raw_recent_private_conversations']:  # nocoverage
            return

        old_max_message_id = state['raw_recent_private_conversations'][recipient_id]['max_message_id']
        if old_max_message_id != event['message_id']:  # nocoverage
            return

        # OK, we just deleted what had been the max_message_id for
        # this recent conversation; we need to recompute that value
        # from scratch.  Definitely don't need to re-query everything,
        # but this case is likely rare enough that it's reasonable to do so.
        state['raw_recent_private_conversations'] = \
            get_recent_private_conversations(user_profile)
    elif event['type'] == "reaction":
        # The client will get the message with the reactions directly
        pass
    elif event['type'] == "submessage":
        # The client will get submessages with their messages
        pass
    elif event['type'] == 'typing':
        # Typing notification events are transient and thus ignored
        pass
    elif event['type'] == "attachment":
        # Attachment events are just for updating the "uploads" UI;
        # they are not sent directly.
        pass
    elif event['type'] == "update_message_flags":
        # We don't return messages in `/register`, so most flags we
        # can ignore, but we do need to update the unread_msgs data if
        # unread state is changed.
        if 'raw_unread_msgs' in state and event['flag'] == 'read' and event['operation'] == 'add':
            for remove_id in event['messages']:
                remove_message_id_from_unread_mgs(state['raw_unread_msgs'], remove_id)
        if event['flag'] == 'starred' and event['operation'] == 'add':
            state['starred_messages'] += event['messages']
        if event['flag'] == 'starred' and event['operation'] == 'remove':
            state['starred_messages'] = [message for message in state['starred_messages']
                                         if not (message in event['messages'])]
    elif event['type'] == "realm_domains":
        if event['op'] == 'add':
            state['realm_domains'].append(event['realm_domain'])
        elif event['op'] == 'change':
            for realm_domain in state['realm_domains']:
                if realm_domain['domain'] == event['realm_domain']['domain']:
                    realm_domain['allow_subdomains'] = event['realm_domain']['allow_subdomains']
        elif event['op'] == 'remove':
            state['realm_domains'] = [realm_domain for realm_domain in state['realm_domains']
                                      if realm_domain['domain'] != event['domain']]
    elif event['type'] == "realm_emoji":
        state['realm_emoji'] = event['realm_emoji']
    elif event['type'] == 'realm_export':
        # These realm export events are only available to
        # administrators, and aren't included in page_params.
        pass
    elif event['type'] == "alert_words":
        state['alert_words'] = event['alert_words']
    elif event['type'] == "muted_topics":
        state['muted_topics'] = event["muted_topics"]
    elif event['type'] == "realm_filters":
        state['realm_filters'] = event["realm_filters"]
    elif event['type'] == "update_display_settings":
        assert event['setting_name'] in UserProfile.property_types
        state[event['setting_name']] = event['setting']
    elif event['type'] == "update_global_notifications":
        assert event['notification_name'] in UserProfile.notification_setting_types
        state[event['notification_name']] = event['setting']
    elif event['type'] == "invites_changed":
        pass
    elif event['type'] == "user_group":
        if event['op'] == 'add':
            state['realm_user_groups'].append(event['group'])
            state['realm_user_groups'].sort(key=lambda group: group['id'])
        elif event['op'] == 'update':
            for user_group in state['realm_user_groups']:
                if user_group['id'] == event['group_id']:
                    user_group.update(event['data'])
        elif event['op'] == 'add_members':
            for user_group in state['realm_user_groups']:
                if user_group['id'] == event['group_id']:
                    user_group['members'].extend(event['user_ids'])
                    user_group['members'].sort()
        elif event['op'] == 'remove_members':
            for user_group in state['realm_user_groups']:
                if user_group['id'] == event['group_id']:
                    members = set(user_group['members'])
                    user_group['members'] = list(members - set(event['user_ids']))
                    user_group['members'].sort()
        elif event['op'] == 'remove':
            state['realm_user_groups'] = [ug for ug in state['realm_user_groups']
                                          if ug['id'] != event['group_id']]
    elif event['type'] == 'user_status':
        user_id = event['user_id']
        user_status = state['user_status']
        away = event.get('away')
        status_text = event.get('status_text')

        if user_id not in user_status:
            user_status[user_id] = dict()

        if away is not None:
            if away:
                user_status[user_id]['away'] = True
            else:
                user_status[user_id].pop('away', None)

        if status_text is not None:
            if status_text == '':
                user_status[user_id].pop('status_text', None)
            else:
                user_status[user_id]['status_text'] = status_text

        if not user_status[user_id]:
            user_status.pop(user_id, None)

        state['user_status'] = user_status
    else:
        raise AssertionError("Unexpected event type %s" % (event['type'],))
Example #32
0
def _wrap_send(func, instance, args, kwargs):
    """Trace the `Session.send` instance method"""
    # TODO[manu]: we already offer a way to provide the Global Tracer
    # and is ddtrace.tracer; it's used only inside our tests and can
    # be easily changed by providing a TracingTestCase that sets common
    # tracing functionalities.
    tracer = getattr(instance, 'datadog_tracer', ddtrace.tracer)

    # skip if tracing is not enabled
    if not tracer.enabled:
        return func(*args, **kwargs)

    request = kwargs.get('request') or args[0]
    if not request:
        return func(*args, **kwargs)

    # sanitize url of query
    parsed_uri = parse.urlparse(request.url)
    hostname = parsed_uri.hostname
    if parsed_uri.port:
        hostname = '{}:{}'.format(hostname, parsed_uri.port)
    sanitized_url = parse.urlunparse((
        parsed_uri.scheme,
        parsed_uri.netloc,
        parsed_uri.path,
        parsed_uri.params,
        None,  # drop parsed_uri.query
        parsed_uri.fragment))

    with tracer.trace('requests.request', span_type=SpanTypes.HTTP) as span:
        # update the span service name before doing any action
        span.service = _extract_service_name(instance, span, hostname=hostname)

        # Configure trace search sample rate
        # DEV: analytics enabled on per-session basis
        cfg = config.get_from(instance)
        analytics_enabled = cfg.get('analytics_enabled')
        if analytics_enabled:
            span.set_tag(ANALYTICS_SAMPLE_RATE_KEY,
                         cfg.get('analytics_sample_rate', True))

        # propagate distributed tracing headers
        if cfg.get('distributed_tracing'):
            propagator = HTTPPropagator()
            propagator.inject(span.context, request.headers)

        # Storing request headers in the span
        store_request_headers(request.headers, span, config.requests)

        response = None
        try:
            response = func(*args, **kwargs)

            # Storing response headers in the span. Note that response.headers is not a dict, but an iterable
            # requests custom structure, that we convert to a dict
            if hasattr(response, 'headers'):
                store_response_headers(dict(response.headers), span,
                                       config.requests)
            return response
        finally:
            try:
                span.set_tag(http.METHOD, request.method.upper())
                span.set_tag(http.URL, sanitized_url)
                if config.requests.trace_query_string:
                    span.set_tag(http.QUERY_STRING, parsed_uri.query)
                if response is not None:
                    span.set_tag(http.STATUS_CODE, response.status_code)
                    # `span.error` must be an integer
                    span.error = int(500 <= response.status_code)
                    # Storing response headers in the span.
                    # Note that response.headers is not a dict, but an iterable
                    # requests custom structure, that we convert to a dict
                    response_headers = dict(getattr(response, 'headers', {}))
                    store_response_headers(response_headers, span,
                                           config.requests)
            except Exception:
                log.debug('requests: error adding tags', exc_info=True)
_KEYRANGE.fields_by_name['start_open'].containing_oneof = _KEYRANGE.oneofs_by_name['start_key_type']
_KEYRANGE.oneofs_by_name['end_key_type'].fields.append(
  _KEYRANGE.fields_by_name['end_closed'])
_KEYRANGE.fields_by_name['end_closed'].containing_oneof = _KEYRANGE.oneofs_by_name['end_key_type']
_KEYRANGE.oneofs_by_name['end_key_type'].fields.append(
  _KEYRANGE.fields_by_name['end_open'])
_KEYRANGE.fields_by_name['end_open'].containing_oneof = _KEYRANGE.oneofs_by_name['end_key_type']
_KEYSET.fields_by_name['keys'].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE
_KEYSET.fields_by_name['ranges'].message_type = _KEYRANGE
DESCRIPTOR.message_types_by_name['KeyRange'] = _KEYRANGE
DESCRIPTOR.message_types_by_name['KeySet'] = _KEYSET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)

KeyRange = _reflection.GeneratedProtocolMessageType('KeyRange', (_message.Message,), dict(
  DESCRIPTOR = _KEYRANGE,
  __module__ = 'google.spanner.v1.keys_pb2'
  # @@protoc_insertion_point(class_scope:google.spanner.v1.KeyRange)
  ))
_sym_db.RegisterMessage(KeyRange)

KeySet = _reflection.GeneratedProtocolMessageType('KeySet', (_message.Message,), dict(
  DESCRIPTOR = _KEYSET,
  __module__ = 'google.spanner.v1.keys_pb2'
  # @@protoc_insertion_point(class_scope:google.spanner.v1.KeySet)
  ))
_sym_db.RegisterMessage(KeySet)


DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.google.spanner.v1B\tKeysProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1'))
try:
    'LTU', 'LUX', 'LVA', 'MAC', 'MAR', 'MCO', 'MDA', 'MDG', 'MDV', 'MEX', 'MHL',
    'MKD', 'MLI', 'MLT', 'MMR', 'MNG', 'MNP', 'MOZ', 'MRT', 'MSR', 'MTQ', 'MUS',
    'MWI', 'MYS', 'MYT', 'NAM', 'NCL', 'NER', 'NFK', 'NGA', 'NIC', 'NIU', 'NLD',
    'NOR', 'NPL', 'NRU', 'NZL', 'OMN', 'PAK', 'PAN', 'PCN', 'PER', 'PHL', 'PLW',
    'PNG', 'POL', 'PRI', 'PRK', 'PRT', 'PRY', 'PSE', 'PYF', 'QAT', 'REU', 'ROU',
    'RUS', 'RWA', 'SAU', 'SCG', 'SDN', 'SEN', 'SGP', 'SHN', 'SJM', 'SLB', 'SLE',
    'SLV', 'SMR', 'SOM', 'SPM', 'STP', 'SUR', 'SVK', 'SVN', 'SWE', 'SWZ', 'SYC',
    'SYR', 'TCA', 'TCD', 'TGO', 'THA', 'TJK', 'TKL', 'TKM', 'TLS', 'TON', 'TTO',
    'TUN', 'TUR', 'TUV', 'TWN', 'TZA', 'UGA', 'UKR', 'URY', 'USA', 'UZB', 'VCT',
    'VEN', 'VGB', 'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'ZAF', 'ZMB', 'ZWE',
]
"""ISO 3166 alpha-3 codes of countries used in ISIMIP_GPWV3_NATID_150AS"""

NONISO_REGIONS = [
    # Dummy region for numeric 0 (or empty string), sometimes used for oceans
    dict(name="", alpha_2="", alpha_3="", numeric="000"),
    dict(name="Akrotiri", alpha_2="XA", alpha_3="XXA", numeric="901"),
    dict(name="Baikonur", alpha_2="XB", alpha_3="XXB", numeric="902"),
    dict(name="Bajo Nuevo Bank", alpha_2="XJ", alpha_3="XXJ", numeric="903"),
    dict(name="Bir Tawil", alpha_2="XQ", alpha_3="XXQ", numeric="919"),
    dict(name="Brazilian I.", alpha_2="XE", alpha_3="XXE", numeric="909"),
    dict(name="Clipperton I.", alpha_2="XC", alpha_3="XXC", numeric="904"),
    dict(name="Coral Sea Is.", alpha_2="XO", alpha_3="XXO", numeric="905"),
    dict(name="Cyprus U.N. Buffer Zone", alpha_2="XU", alpha_3="XXU", numeric="906"),
    dict(name="Dhekelia", alpha_2="XD", alpha_3="XXD", numeric="907"),
    dict(name="Indian Ocean Ter.", alpha_2="XI", alpha_3="XXI", numeric="908"),
    # For Kosovo, we follow the iso3166 package and the statistical office of Canada:
    # https://www.statcan.gc.ca/eng/subjects/standard/sccai/2011/scountry-desc
    dict(name="Kosovo", alpha_2="XK", alpha_3="XKO", numeric="983"),
    dict(name="N. Cyprus", alpha_2="XY", alpha_3="XXY", numeric="910"),
    dict(name="Scarborough Reef", alpha_2="XS", alpha_3="XXS", numeric="912"),
Example #35
0
def import_documents(request):
    def is_reserved_directory(doc):
        return doc['fields']['type'] == 'directory' and doc['fields'][
            'name'] in (Document2.HOME_DIR, Document2.TRASH_DIR)

    try:
        if request.FILES.get('documents'):
            documents = request.FILES['documents'].read()
        else:
            documents = json.loads(request.POST.get('documents'))

        documents = json.loads(documents)
    except ValueError as e:
        raise PopupException(
            _('Failed to import documents, the file does not contain valid JSON.'
              ))

    # Validate documents
    if not _is_import_valid(documents):
        raise PopupException(
            _('Failed to import documents, the file does not contain the expected JSON schema for Hue documents.'
              ))

    docs = []

    uuids_map = dict((doc['fields']['uuid'], None) for doc in documents
                     if not is_reserved_directory(doc))

    for doc in documents:
        # Filter docs to import, ignoring reserved directories (home and Trash) and history docs
        if not is_reserved_directory(doc):
            # Remove any deprecated fields
            if 'tags' in doc['fields']:
                doc['fields'].pop('tags')

            # If doc is not owned by current user, make a copy of the document with current user as owner
            if doc['fields']['owner'][0] != request.user.username:
                doc = _copy_document_with_owner(doc, request.user, uuids_map)
            else:  # Update existing doc or create new
                doc = _create_or_update_document_with_owner(
                    doc, request.user, uuids_map)

            # For oozie docs replace dependent uuids with the newly created ones
            if doc['fields']['type'].startswith('oozie-'):
                doc = _update_imported_oozie_document(doc, uuids_map)

            # If the doc contains any history dependencies, ignore them
            # NOTE: this assumes that each dependency is exported as an array using the natural PK [uuid, version, is_history]
            deps_minus_history = [
                dep for dep in doc['fields'].get('dependencies', [])
                if len(dep) >= 3 and not dep[2]
            ]
            doc['fields']['dependencies'] = deps_minus_history

            # Replace illegal characters
            if '/' in doc['fields']['name']:
                new_name = doc['fields']['name'].replace('/', '-')
                LOG.warn(
                    "Found illegal slash in document named: %s, renaming to: %s."
                    % (doc['fields']['name'], new_name))
                doc['fields']['name'] = new_name

            # Set last modified date to now
            doc['fields']['last_modified'] = datetime.now().replace(
                microsecond=0).isoformat()
            docs.append(doc)

    f = tempfile.NamedTemporaryFile(mode='w+', suffix='.json')
    f.write(json.dumps(docs))
    f.flush()

    stdout = string_io()
    try:
        with transaction.atomic(
        ):  # We wrap both commands to commit loaddata & sync
            management.call_command('loaddata',
                                    f.name,
                                    verbosity=3,
                                    traceback=True,
                                    stdout=stdout)
            Document.objects.sync()

        if request.POST.get('redirect'):
            return redirect(request.POST.get('redirect'))
        else:
            return JsonResponse({
                'status':
                0,
                'message':
                stdout.getvalue(),
                'count':
                len(documents),
                'created_count':
                len([doc for doc in documents if doc['pk'] is None]),
                'updated_count':
                len([doc for doc in documents if doc['pk'] is not None]),
                'username':
                request.user.username,
                'documents': [
                    dict([('name', doc['fields']['name']),
                          ('uuid', doc['fields']['uuid']),
                          ('type', doc['fields']['type']),
                          ('owner', doc['fields']['owner'][0])])
                    for doc in docs
                ]
            })
    except Exception as e:
        LOG.error('Failed to run loaddata command in import_documents:\n %s' %
                  stdout.getvalue())
        return JsonResponse({'status': -1, 'message': smart_str(e)})
    finally:
        stdout.close()
Example #36
0
def hopcroft_karp_matching(G, top_nodes=None):
    """Returns the maximum cardinality matching of the bipartite graph `G`.

    Parameters
    ----------
    G : NetworkX graph

      Undirected bipartite graph

    top_nodes : container

      Container with all nodes in one bipartite node set. If not supplied
      it will be computed. But if more than one solution exists an exception
      will be raised.

    Returns
    -------
    matches : dictionary

      The matching is returned as a dictionary, `matches`, such that
      ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched
      nodes do not occur as a key in mate.

    Raises
    ------
    AmbiguousSolution : Exception

      Raised if the input bipartite graph is disconnected and no container
      with all nodes in one bipartite set is provided. When determining
      the nodes in each bipartite set more than one valid solution is
      possible if the input graph is disconnected.

    Notes
    -----
    This function is implemented with the `Hopcroft--Karp matching algorithm
    <https://en.wikipedia.org/wiki/Hopcroft%E2%80%93Karp_algorithm>`_ for
    bipartite graphs.

    See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
    for further details on how bipartite graphs are handled in NetworkX.

    See Also
    --------

    eppstein_matching

    References
    ----------
    .. [1] John E. Hopcroft and Richard M. Karp. "An n^{5 / 2} Algorithm for
       Maximum Matchings in Bipartite Graphs" In: **SIAM Journal of Computing**
       2.4 (1973), pp. 225--231. <https://doi.org/10.1137/0202019>.

    """
    # First we define some auxiliary search functions.
    #
    # If you are a human reading these auxiliary search functions, the "global"
    # variables `leftmatches`, `rightmatches`, `distances`, etc. are defined
    # below the functions, so that they are initialized close to the initial
    # invocation of the search functions.
    def breadth_first_search():
        for v in left:
            if leftmatches[v] is None:
                distances[v] = 0
                queue.append(v)
            else:
                distances[v] = INFINITY
        distances[None] = INFINITY
        while queue:
            v = queue.popleft()
            if distances[v] < distances[None]:
                for u in G[v]:
                    if distances[rightmatches[u]] is INFINITY:
                        distances[rightmatches[u]] = distances[v] + 1
                        queue.append(rightmatches[u])
        return distances[None] is not INFINITY

    def depth_first_search(v):
        if v is not None:
            for u in G[v]:
                if distances[rightmatches[u]] == distances[v] + 1:
                    if depth_first_search(rightmatches[u]):
                        rightmatches[u] = v
                        leftmatches[v] = u
                        return True
            distances[v] = INFINITY
            return False
        return True

    # Initialize the "global" variables that maintain state during the search.
    left, right = bipartite_sets(G, top_nodes)
    leftmatches = {v: None for v in left}
    rightmatches = {v: None for v in right}
    distances = {}
    queue = collections.deque()

    # Implementation note: this counter is incremented as pairs are matched but
    # it is currently not used elsewhere in the computation.
    num_matched_pairs = 0
    while breadth_first_search():
        for v in left:
            if leftmatches[v] is None:
                if depth_first_search(v):
                    num_matched_pairs += 1

    # Strip the entries matched to `None`.
    leftmatches = {k: v for k, v in leftmatches.items() if v is not None}
    rightmatches = {k: v for k, v in rightmatches.items() if v is not None}

    # At this point, the left matches and the right matches are inverses of one
    # another. In other words,
    #
    #     leftmatches == {v, k for k, v in rightmatches.items()}
    #
    # Finally, we combine both the left matches and right matches.
    return dict(itertools.chain(leftmatches.items(), rightmatches.items()))
Example #37
0
    def select(self,
               location=None,
               channel=None,
               time=None,
               starttime=None,
               endtime=None,
               sampling_rate=None,
               minlatitude=None,
               maxlatitude=None,
               minlongitude=None,
               maxlongitude=None,
               latitude=None,
               longitude=None,
               minradius=None,
               maxradius=None):
        r"""
        Returns the :class:`Station` object with only the
        :class:`~obspy.core.inventory.channel.Channel`\ s that match the given
        criteria (e.g. all channels with ``channel="EHZ"``).

        .. warning::
            The returned object is based on a shallow copy of the original
            object. That means that modifying any mutable child elements will
            also modify the original object
            (see https://docs.python.org/2/library/copy.html).
            Use :meth:`copy()` afterwards to make a new copy of the data in
            memory.

        .. rubric:: Example

        >>> from obspy import read_inventory, UTCDateTime
        >>> sta = read_inventory()[0][0]
        >>> t = UTCDateTime(2008, 7, 1, 12)
        >>> sta = sta.select(channel="[LB]HZ", time=t)
        >>> print(sta)  # doctest: +NORMALIZE_WHITESPACE
        Station FUR (Fuerstenfeldbruck, Bavaria, GR-Net)
            Station Code: FUR
            Channel Count: None/None (Selected/Total)
            2006-12-16T00:00:00.000000Z -
            Access: None
            Latitude: 48.16, Longitude: 11.28, Elevation: 565.0 m
            Available Channels:
                FUR..BHZ, FUR..LHZ

        The `location` and `channel` selection criteria  may also contain UNIX
        style wildcards (e.g. ``*``, ``?``, ...; see
        :func:`~fnmatch.fnmatch`).

        :type location: str
        :param location: Potentially wildcarded location code. If not given,
            all location codes will be accepted.
        :type channel: str
        :param channel: Potentially wildcarded channel code. If not given,
            all channel codes will be accepted.
        :type time: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param time: Only include channels active at given point in time.
        :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param starttime: Only include channels active at or after given point
            in time (i.e. channels ending before given time will not be shown).
        :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param endtime: Only include channels active before or at given point
            in time (i.e. channels starting after given time will not be
            shown).
        :type sampling_rate: float
        :param sampling_rate: Only include channels whose sampling rate
            matches the given sampling rate, in Hz (within absolute tolerance
            of 1E-8 Hz and relative tolerance of 1E-5)
        :type minlatitude: float
        :param minlatitude: Only include channels with a latitude larger than
            the specified minimum.
        :type maxlatitude: float
        :param maxlatitude: Only include channels with a latitude smaller than
            the specified maximum.
        :type minlongitude: float
        :param minlongitude: Only include channels with a longitude larger than
            the specified minimum.
        :type maxlongitude: float
        :param maxlongitude: Only include channels with a longitude smaller
            than the specified maximum.
        :type latitude: float
        :param latitude: Specify the latitude to be used for a radius
            selection.
        :type longitude: float
        :param longitude: Specify the longitude to be used for a radius
            selection.
        :type minradius: float
        :param minradius: Only include channels within the specified
            minimum number of degrees from the geographic point defined by the
            latitude and longitude parameters.
        :type maxradius: float
        :param maxradius: Only include channels within the specified
            maximum number of degrees from the geographic point defined by the
            latitude and longitude parameters.
        """
        channels = []
        for cha in self.channels:
            # skip if any given criterion is not matched
            if location is not None:
                if not fnmatch.fnmatch(cha.location_code.upper(),
                                       location.upper()):
                    continue
            if channel is not None:
                if not fnmatch.fnmatch(cha.code.upper(), channel.upper()):
                    continue
            if sampling_rate is not None:
                if cha.sample_rate is None:
                    msg = ("Omitting channel that has no sampling rate "
                           "specified.")
                    warnings.warn(msg)
                    continue
                if not np.allclose(float(sampling_rate),
                                   cha.sample_rate,
                                   rtol=1E-5,
                                   atol=1E-8):
                    continue
            if any([t is not None for t in (time, starttime, endtime)]):
                if not cha.is_active(
                        time=time, starttime=starttime, endtime=endtime):
                    continue
            geo_filters = dict(minlatitude=minlatitude,
                               maxlatitude=maxlatitude,
                               minlongitude=minlongitude,
                               maxlongitude=maxlongitude,
                               latitude=latitude,
                               longitude=longitude,
                               minradius=minradius,
                               maxradius=maxradius)
            if any(value is not None for value in geo_filters.values()):
                if not inside_geobounds(cha, **geo_filters):
                    continue

            channels.append(cha)
        sta = copy.copy(self)
        sta.channels = channels
        return sta
Example #38
0
    def diff(self,
             a=None,
             b=None,
             cached=False,
             flags=GIT_DIFF_NORMAL,
             context_lines=3,
             interhunk_lines=0):
        """
        Show changes between the working tree and the index or a tree,
        changes between the index and a tree, changes between two trees, or
        changes between two blobs.

        Keyword arguments:

        cached
            use staged changes instead of workdir

        flag
            a GIT_DIFF_* constant

        context_lines
            the number of unchanged lines that define the boundary
            of a hunk (and to display before and after)

        interhunk_lines
            the maximum number of unchanged lines between hunk
            boundaries before the hunks will be merged into a one

        Examples::

          # Changes in the working tree not yet staged for the next commit
          >>> diff()

          # Changes between the index and your last commit
          >>> diff(cached=True)

          # Changes in the working tree since your last commit
          >>> diff('HEAD')

          # Changes between commits
          >>> t0 = revparse_single('HEAD')
          >>> t1 = revparse_single('HEAD^')
          >>> diff(t0, t1)
          >>> diff('HEAD', 'HEAD^') # equivalent

        If you want to diff a tree against an empty tree, use the low level
        API (Tree.diff_to_tree()) directly.
        """
        def whatever_to_tree_or_blob(obj):
            if obj is None:
                return None

            # If it's a string, then it has to be valid revspec
            if is_string(obj):
                obj = self.revparse_single(obj)

            # First we try to get to a blob
            try:
                obj = obj.peel(Blob)
            except Exception:
                pass

            # And if that failed, try to get a tree, raising a type
            # error if that still doesn't work
            try:
                obj = obj.peel(Tree)
            except Exception:
                raise TypeError('unexpected "%s"' % type(obj))

            return obj

        a = whatever_to_tree_or_blob(a)
        b = whatever_to_tree_or_blob(b)

        opt_keys = ['flags', 'context_lines', 'interhunk_lines']
        opt_values = [flags, context_lines, interhunk_lines]

        # Case 1: Diff tree to tree
        if isinstance(a, Tree) and isinstance(b, Tree):
            return a.diff_to_tree(b, **dict(zip(opt_keys, opt_values)))

        # Case 2: Index to workdir
        elif a is None and b is None:
            return self.index.diff_to_workdir(*opt_values)

        # Case 3: Diff tree to index or workdir
        elif isinstance(a, Tree) and b is None:
            if cached:
                return a.diff_to_index(self.index, *opt_values)
            else:
                return a.diff_to_workdir(*opt_values)

        # Case 4: Diff blob to blob
        if isinstance(a, Blob) and isinstance(b, Blob):
            raise NotImplementedError('git_diff_blob_to_blob()')

        raise ValueError("Only blobs and treeish can be diffed")
Example #39
0
# Illustrates basic core components
import dash
import dash_core_components as dcc
import dash_html_components as html

app = dash.Dash(__name__)

# initiate core components
# . these are labels followed by the core component itself, assembled within a div

# .. dropdowns
dd_lbl = html.Label('Dropdown')
dd =  dcc.Dropdown(
                options=[
                    dict(label='New York City', value='NYC'),
                    dict(label=u'Montréal', value='MTL'),
                    dict(label='San Francisco', value='SF')
                ],
                value='MTL'
            )
            

# .. multi dropdowns
mdd_lbl = html.Label('Multi-Dropdown')
mdd = dcc.Dropdown(
                        options=[
                            dict(label='New York City', value='NYC'),
                            dict(label=u'Montréal', value='MTL'),
                            dict(label='San Francisco', value='SF')
                        ],
                        value=['MTL', 'SF'],
Example #40
0
 def process_item(self, item, spider):
     self.sheet.insert(dict(item))
     return item
Example #41
0
    def make_side_view(self, axis_name):
        scene = getattr(self, 'scene_%s' % axis_name)
        scene.scene.parallel_projection = True
        ipw_3d = getattr(self, 'ipw_3d_%s' % axis_name)

        # We create the image_plane_widgets in the side view using a
        # VTK dataset pointing to the data on the corresponding
        # image_plane_widget in the 3D view (it is returned by
        # ipw_3d._get_reslice_output())
        side_src = ipw_3d.ipw._get_reslice_output()
        ipw = mlab.pipeline.image_plane_widget(
            side_src,
            plane_orientation='z_axes',
            vmin=self.data.min(),
            vmax=self.data.max(),
            figure=scene.mayavi_scene,
            name='Cut view %s' % axis_name,
        )
        setattr(self, 'ipw_%s' % axis_name, ipw)

        # Extract the spacing of the side_src to convert coordinates
        # into indices
        spacing = side_src.spacing

        # Make left-clicking create a crosshair
        ipw.ipw.left_button_action = 0

        x, y, z = self.position
        cursor = mlab.points3d(
            x,
            y,
            z,
            mode='axes',
            color=(0, 0, 0),
            scale_factor=2 * max(self.data.shape),
            figure=scene.mayavi_scene,
            name='Cursor view %s' % axis_name,
        )
        self.cursors[axis_name] = cursor

        # Add a callback on the image plane widget interaction to
        # move the others
        this_axis_number = self._axis_names[axis_name]

        def move_view(obj, evt):
            # Disable rendering on all scene
            position = list(obj.GetCurrentCursorPosition() * spacing)[:2]
            position.insert(this_axis_number, self.position[this_axis_number])
            # We need to special case y, as the view has been rotated.
            if axis_name is 'y':
                position = position[::-1]
            self.position = position

        ipw.ipw.add_observer('InteractionEvent', move_view)
        ipw.ipw.add_observer('StartInteractionEvent', move_view)

        # Center the image plane widget
        ipw.ipw.slice_position = 0.5 * self.data.shape[
            self._axis_names[axis_name]]

        # 2D interaction: only pan and zoom
        scene.scene.interactor.interactor_style = \
                                 tvtk.InteractorStyleImage()
        scene.scene.background = (0, 0, 0)

        # Some text:
        mlab.text(0.01, 0.8, axis_name, width=0.08)

        # Choose a view that makes sens
        views = dict(x=(0, 0), y=(90, 180), z=(0, 0))
        mlab.view(views[axis_name][0],
                  views[axis_name][1],
                  focalpoint=0.5 * np.array(self.data.shape),
                  figure=scene.mayavi_scene)
        scene.scene.camera.parallel_scale = 0.52 * np.mean(self.data.shape)
def main():
    parser = argparse.ArgumentParser(description='Redirect streaming events as email notifications.')

    parser.add_argument(
        'telemetryUrl',
        help='IP address or hostname of CVP or Telemetry',
    )
    parser.add_argument(
        'smtpServer',
        help='IP address or hostname of SMTP (email) server',
    )
    parser.add_argument(
        'sendToAddress',
        nargs='+',
        help='List of email recipients',
    )
    parser.add_argument(
        '-cc',
        '--sendCcAddress',
        nargs='+',
        help='List of CC email recipients',
    )
    parser.add_argument(
        '-s',
        '--subjectPrefix',
        default='[CloudVision Telemetry]',
        help='Text to prefix the Subject line',
    )
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=465,
        help='destination port on SMTP server',
    )
    parser.add_argument(
        '--smtpUsername',
        help='SMTP (email) server username, if authentication is required. e.g.: [email protected]',
    )
    parser.add_argument(
        '--smtpPassword',
        help='''SMTP (email) server password, if authentication is required.
                If omitted you will be prompted for it at startup''',
    )
    parser.add_argument(
        '--noSmtpSsl',
        action='store_true',
        default=False,
        help='Flag to disable SSL SMTP connection',
    )
    parser.add_argument(
        '--noTelemetrySsl',
        action='store_true',
        default=False,
        help='Flag to disable SSL websocket connection',
    )
    parser.add_argument(
        '--telemetryUsername',
        help='Telemetry username, if authentication is required',
    )
    parser.add_argument(
        '--telemetryPassword',
        help='''Telemetry password, if authentication is required.
                If omitted you will be prompted for it at startup''',
    )
    parser.add_argument(
        '--noSslValidation',
        action='store_true',
        default=False,
        help='Disable validation of SSL certificates (inadvised; potentially dangerous)',
    )
    parser.add_argument(
        '--verbose',
        action='store_true',
        default=False,
        help='Display additional info messages'
    )

    cmd_args = parser.parse_args()

    passwords = dict()

    if cmd_args.smtpPassword:
        passwords['smtpPassword'] = cmd_args.smtpPassword
    elif cmd_args.smtpUsername:
        passwords['smtpPassword'] = get_password('Enter SMTP server password for {}'.format(cmd_args.smtpUsername))

    if cmd_args.telemetryPassword:
        passwords['telemetryPassword'] = cmd_args.telemetryPassword
    elif not cmd_args.noTelemetrySsl:
        passwords['telemetryPassword'] = get_password('Enter Telemetry password for {}'.format(cmd_args.telemetryUrl))

    logging_level = logging.DEBUG if cmd_args.verbose else logging.WARNING
    logging.basicConfig(level=logging_level)

    connection = TelemetryWs(cmd_args, passwords)

    try:
        ssl_options = None
        if cmd_args.noSslValidation:
            ssl_options = {
                'check_hostname': False,
                'cert_reqs': ssl.CERT_NONE,
            }

        connection.socket.run_forever(sslopt=ssl_options)
    except KeyboardInterrupt:
        connection.socket.close()
        exit()
    def assign(self,
               approxs,
               squares,
               approxs_per_octave,
               gt_bboxes,
               gt_bboxes_ignore=None,
               gt_labels=None,
               iou_calculator=dict(type='BboxOverlaps2D')):
        """Assign gt to approxs.

        This method assign a gt bbox to each group of approxs (bboxes),
        each group of approxs is represent by a base approx (bbox) and
        will be assigned with -1, 0, or a positive number.
        -1 means don't care, 0 means negative sample,
        positive number is the index (1-based) of assigned gt.
        The assignment is done in following steps, the order matters.

        1. assign every bbox to -1
        2. use the max IoU of each group of approxs to assign
        2. assign proposals whose iou with all gts < neg_iou_thr to 0
        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
           assign it to that bbox
        4. for each gt bbox, assign its nearest proposals (may be more than
           one) to itself

        Args:
            approxs (Tensor): Bounding boxes to be assigned,
        shape(approxs_per_octave*n, 4).
            squares (Tensor): Base Bounding boxes to be assigned,
        shape(n, 4).
            approxs_per_octave (int): number of approxs per octave
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`, e.g., crowd boxes in COCO.
            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).

        Returns:
            :obj:`AssignResult`: The assign result.
        """

        if squares.shape[0] == 0 or gt_bboxes.shape[0] == 0:
            raise ValueError('No gt or approxs')
        num_squares = squares.size(0)
        num_gts = gt_bboxes.size(0)
        # re-organize anchors by approxs_per_octave x num_squares
        approxs = torch.transpose(
            approxs.view(num_squares, approxs_per_octave, 4), 0,
            1).contiguous().view(-1, 4)
        all_overlaps = self.iou_calculator(approxs, gt_bboxes)

        overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
                                        num_gts).max(dim=0)
        overlaps = torch.transpose(overlaps, 0, 1)

        bboxes = squares[:, :4]

        if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and (
                gt_bboxes_ignore.numel() > 0):
            if self.ignore_wrt_candidates:
                ignore_overlaps = self.iou_calculator(
                    bboxes, gt_bboxes_ignore, mode='iof')
                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
            else:
                ignore_overlaps = self.iou_calculator(
                    gt_bboxes_ignore, bboxes, mode='iof')
                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1

        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
        return assign_result
Example #44
0
class VolumeSlicer(HasTraits):
    # The data to plot
    data = Array

    # The position of the view
    position = Array(shape=(3, ))

    # The 4 views displayed
    scene3d = Instance(MlabSceneModel, ())
    scene_x = Instance(MlabSceneModel, ())
    scene_y = Instance(MlabSceneModel, ())
    scene_z = Instance(MlabSceneModel, ())

    # The data source
    data_src = Instance(Source)

    # The image plane widgets of the 3D scene
    ipw_3d_x = Instance(PipelineBase)
    ipw_3d_y = Instance(PipelineBase)
    ipw_3d_z = Instance(PipelineBase)

    # The cursors on each view:
    cursors = Dict()

    disable_render = Bool

    _axis_names = dict(x=0, y=1, z=2)

    #---------------------------------------------------------------------------
    # Object interface
    #---------------------------------------------------------------------------
    def __init__(self, **traits):
        super(VolumeSlicer, self).__init__(**traits)
        # Force the creation of the image_plane_widgets:
        self.ipw_3d_x
        self.ipw_3d_y
        self.ipw_3d_z

    #---------------------------------------------------------------------------
    # Default values
    #---------------------------------------------------------------------------
    def _position_default(self):
        return 0.5 * np.array(self.data.shape)

    def _data_src_default(self):
        return mlab.pipeline.scalar_field(
            self.data,
            figure=self.scene3d.mayavi_scene,
            name='Data',
        )

    def make_ipw_3d(self, axis_name):
        ipw = mlab.pipeline.image_plane_widget(
            self.data_src,
            figure=self.scene3d.mayavi_scene,
            plane_orientation='%s_axes' % axis_name,
            name='Cut %s' % axis_name)
        return ipw

    def _ipw_3d_x_default(self):
        return self.make_ipw_3d('x')

    def _ipw_3d_y_default(self):
        return self.make_ipw_3d('y')

    def _ipw_3d_z_default(self):
        return self.make_ipw_3d('z')

    #---------------------------------------------------------------------------
    # Scene activation callbacks
    #---------------------------------------------------------------------------
    @on_trait_change('scene3d.activated')
    def display_scene3d(self):
        outline = mlab.pipeline.outline(
            self.data_src,
            figure=self.scene3d.mayavi_scene,
        )
        self.scene3d.mlab.view(40, 50)
        # Interaction properties can only be changed after the scene
        # has been created, and thus the interactor exists
        for ipw in (self.ipw_3d_x, self.ipw_3d_y, self.ipw_3d_z):
            ipw.ipw.interaction = 0
        self.scene3d.scene.background = (0, 0, 0)
        # Keep the view always pointing up
        self.scene3d.scene.interactor.interactor_style = \
                                 tvtk.InteractorStyleTerrain()
        self.update_position()

    def make_side_view(self, axis_name):
        scene = getattr(self, 'scene_%s' % axis_name)
        scene.scene.parallel_projection = True
        ipw_3d = getattr(self, 'ipw_3d_%s' % axis_name)

        # We create the image_plane_widgets in the side view using a
        # VTK dataset pointing to the data on the corresponding
        # image_plane_widget in the 3D view (it is returned by
        # ipw_3d._get_reslice_output())
        side_src = ipw_3d.ipw._get_reslice_output()
        ipw = mlab.pipeline.image_plane_widget(
            side_src,
            plane_orientation='z_axes',
            vmin=self.data.min(),
            vmax=self.data.max(),
            figure=scene.mayavi_scene,
            name='Cut view %s' % axis_name,
        )
        setattr(self, 'ipw_%s' % axis_name, ipw)

        # Extract the spacing of the side_src to convert coordinates
        # into indices
        spacing = side_src.spacing

        # Make left-clicking create a crosshair
        ipw.ipw.left_button_action = 0

        x, y, z = self.position
        cursor = mlab.points3d(
            x,
            y,
            z,
            mode='axes',
            color=(0, 0, 0),
            scale_factor=2 * max(self.data.shape),
            figure=scene.mayavi_scene,
            name='Cursor view %s' % axis_name,
        )
        self.cursors[axis_name] = cursor

        # Add a callback on the image plane widget interaction to
        # move the others
        this_axis_number = self._axis_names[axis_name]

        def move_view(obj, evt):
            # Disable rendering on all scene
            position = list(obj.GetCurrentCursorPosition() * spacing)[:2]
            position.insert(this_axis_number, self.position[this_axis_number])
            # We need to special case y, as the view has been rotated.
            if axis_name is 'y':
                position = position[::-1]
            self.position = position

        ipw.ipw.add_observer('InteractionEvent', move_view)
        ipw.ipw.add_observer('StartInteractionEvent', move_view)

        # Center the image plane widget
        ipw.ipw.slice_position = 0.5 * self.data.shape[
            self._axis_names[axis_name]]

        # 2D interaction: only pan and zoom
        scene.scene.interactor.interactor_style = \
                                 tvtk.InteractorStyleImage()
        scene.scene.background = (0, 0, 0)

        # Some text:
        mlab.text(0.01, 0.8, axis_name, width=0.08)

        # Choose a view that makes sens
        views = dict(x=(0, 0), y=(90, 180), z=(0, 0))
        mlab.view(views[axis_name][0],
                  views[axis_name][1],
                  focalpoint=0.5 * np.array(self.data.shape),
                  figure=scene.mayavi_scene)
        scene.scene.camera.parallel_scale = 0.52 * np.mean(self.data.shape)

    @on_trait_change('scene_x.activated')
    def display_scene_x(self):
        return self.make_side_view('x')

    @on_trait_change('scene_y.activated')
    def display_scene_y(self):
        return self.make_side_view('y')

    @on_trait_change('scene_z.activated')
    def display_scene_z(self):
        return self.make_side_view('z')

    #---------------------------------------------------------------------------
    # Traits callback
    #---------------------------------------------------------------------------
    @on_trait_change('position')
    def update_position(self):
        """ Update the position of the cursors on each side view, as well
            as the image_plane_widgets in the 3D view.
        """
        # First disable rendering in all scenes to avoid unecessary
        # renderings
        self.disable_render = True

        # For each axis, move image_plane_widget and the cursor in the
        # side view
        for axis_name, axis_number in self._axis_names.iteritems():
            ipw3d = getattr(self, 'ipw_3d_%s' % axis_name)
            ipw3d.ipw.slice_position = self.position[axis_number]

            # Go from the 3D position, to the 2D coordinates in the
            # side view
            position2d = list(self.position)
            position2d.pop(axis_number)
            if axis_name is 'y':
                position2d = position2d[::-1]
            # Move the cursor
            # For the following to work, you need Mayavi 3.4.0, if you
            # have a less recent version, use 'x=[position2d[0]]'
            self.cursors[axis_name].mlab_source.set(x=position2d[0],
                                                    y=position2d[1],
                                                    z=0)

        # Finally re-enable rendering
        self.disable_render = False

    @on_trait_change('disable_render')
    def _render_enable(self):
        for scene in (self.scene3d, self.scene_x, self.scene_y, self.scene_z):
            scene.scene.disable_render = self.disable_render

    #---------------------------------------------------------------------------
    # The layout of the dialog created
    #---------------------------------------------------------------------------
    view = View(
        HGroup(
            Group(
                Item('scene_y',
                     editor=SceneEditor(scene_class=Scene),
                     height=250,
                     width=300),
                Item('scene_z',
                     editor=SceneEditor(scene_class=Scene),
                     height=250,
                     width=300),
                show_labels=False,
            ),
            Group(
                Item('scene_x',
                     editor=SceneEditor(scene_class=Scene),
                     height=250,
                     width=300),
                Item('scene3d',
                     editor=SceneEditor(scene_class=Scene),
                     height=250,
                     width=300),
                show_labels=False,
            ),
        ),
        resizable=True,
        title='Volume Slicer',
    )
Example #45
0
# 测试eval()函数

s = "print('abcde')"
eval(s)

a = 10
b = 20
c = eval("a+b")
print(c)

dict1 = dict(a=10, b=20)
d = eval("a+b", dict1)
print(d)
Example #46
0
def readCommand(argv):
    """
    Processes the command used to run pacman from the command line.
    """
    from optparse import OptionParser
    usageStr = """
    USAGE:      python pacman.py <options>
    EXAMPLES:   (1) python pacman.py
                    - starts an interactive game
                (2) python pacman.py --layout smallClassic --zoom 2
                OR  python pacman.py -l smallClassic -z 2
                    - starts an interactive game on a smaller board, zoomed in
    """
    parser = OptionParser(usageStr)

    parser.add_option('-n',
                      '--numGames',
                      dest='numGames',
                      type='int',
                      help=default('the number of GAMES to play'),
                      metavar='GAMES',
                      default=1)
    parser.add_option(
        '-l',
        '--layout',
        dest='layout',
        help=default('the LAYOUT_FILE from which to load the map layout'),
        metavar='LAYOUT_FILE',
        default='mediumClassic')
    parser.add_option(
        '-p',
        '--pacman',
        dest='pacman',
        help=default('the agent TYPE in the pacmanAgents module to use'),
        metavar='TYPE',
        default='KeyboardAgent')
    parser.add_option('-t',
                      '--textGraphics',
                      action='store_true',
                      dest='textGraphics',
                      help='Display output as text only',
                      default=False)
    parser.add_option('-q',
                      '--quietTextGraphics',
                      action='store_true',
                      dest='quietGraphics',
                      help='Generate minimal output and no graphics',
                      default=False)
    parser.add_option(
        '-g',
        '--ghosts',
        dest='ghost',
        help=default('the ghost agent TYPE in the ghostAgents module to use'),
        metavar='TYPE',
        default='RandomGhost')
    parser.add_option('-k',
                      '--numghosts',
                      type='int',
                      dest='numGhosts',
                      help=default('The maximum number of ghosts to use'),
                      default=4)
    parser.add_option('-z',
                      '--zoom',
                      type='float',
                      dest='zoom',
                      help=default('Zoom the size of the graphics window'),
                      default=1.0)
    parser.add_option(
        '-f',
        '--fixRandomSeed',
        action='store_true',
        dest='fixRandomSeed',
        help='Fixes the random seed to always play the same game',
        default=False)
    parser.add_option(
        '-r',
        '--recordActions',
        action='store_true',
        dest='record',
        help=
        'Writes game histories to a file (named by the time they were played)',
        default=False)
    parser.add_option('--replay',
                      dest='gameToReplay',
                      help='A recorded game file (pickle) to replay',
                      default=None)
    parser.add_option(
        '-a',
        '--agentArgs',
        dest='agentArgs',
        help=
        'Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"'
    )
    parser.add_option(
        '-x',
        '--numTraining',
        dest='numTraining',
        type='int',
        help=default('How many episodes are training (suppresses output)'),
        default=0)
    parser.add_option(
        '--frameTime',
        dest='frameTime',
        type='float',
        help=default('Time to delay between frames; <0 means keyboard'),
        default=0.1)
    parser.add_option(
        '-c',
        '--catchExceptions',
        action='store_true',
        dest='catchExceptions',
        help='Turns on exception handling and timeouts during games',
        default=False)
    parser.add_option(
        '--timeout',
        dest='timeout',
        type='int',
        help=default(
            'Maximum length of time an agent can spend computing in a single game'
        ),
        default=30)

    options, otherjunk = parser.parse_args(argv)
    if len(otherjunk) != 0:
        raise Exception('Command line input not understood: ' + str(otherjunk))
    args = dict()

    # Fix the random seed
    if options.fixRandomSeed: random.seed('cs188')

    # Choose a layout
    args['layout'] = layout.getLayout(options.layout)
    if args['layout'] == None:
        raise Exception("The layout " + options.layout + " cannot be found")

    # Choose a Pacman agent
    noKeyboard = options.gameToReplay == None and (options.textGraphics
                                                   or options.quietGraphics)
    pacmanType = loadAgent(options.pacman, noKeyboard)
    agentOpts = parseAgentArgs(options.agentArgs)
    if options.numTraining > 0:
        args['numTraining'] = options.numTraining
        if 'numTraining' not in agentOpts:
            agentOpts['numTraining'] = options.numTraining
    pacman = pacmanType(**agentOpts)  # Instantiate Pacman with agentArgs
    args['pacman'] = pacman

    # Don't display training games
    if 'numTrain' in agentOpts:
        options.numQuiet = int(agentOpts['numTrain'])
        options.numIgnore = int(agentOpts['numTrain'])

    # Choose a ghost agent
    ghostType = loadAgent(options.ghost, noKeyboard)
    args['ghosts'] = [ghostType(i + 1) for i in range(options.numGhosts)]

    # Choose a display format
    if options.quietGraphics:
        import textDisplay
        args['display'] = textDisplay.NullGraphics()
    elif options.textGraphics:
        import textDisplay
        textDisplay.SLEEP_TIME = options.frameTime
        args['display'] = textDisplay.PacmanGraphics()
    else:
        import graphicsDisplay
        args['display'] = graphicsDisplay.PacmanGraphics(
            options.zoom, frameTime=options.frameTime)
    args['numGames'] = options.numGames
    args['record'] = options.record
    args['catchExceptions'] = options.catchExceptions
    args['timeout'] = options.timeout

    # Special case: recorded games don't use the runGames method or args structure
    if options.gameToReplay != None:
        print('Replaying recorded game %s.' % options.gameToReplay)
        import pickle
        f = open(options.gameToReplay, 'rb')
        try:
            recorded = pickle.load(f)
        finally:
            f.close()
        recorded['display'] = args['display']
        replayGame(**recorded)
        sys.exit(0)

    return args
Example #47
0
 def handle_starttag(self, tag, attrs):
     href = dict(attrs).get("href")
     if href and tag == "a":
         self.urls.append(href)
 variant = dict(
     algo_kwargs=dict(
         num_epochs=505,
         num_steps_per_epoch=1000,
         num_steps_per_eval=1000,
         tau=1e-2,
         batch_size=128,
         max_path_length=100,
         discount=0.99,
         # qf_learning_rate=1e-3,
         # policy_learning_rate=1e-4,
     ),
     env_kwargs=dict(
         include_puck=True,
         arm_range=0.1,
         use_big_red_puck=True,
         reward_params=dict(
             type="sparse",
             epsilon=0.2,
             # puck_reward_only=True,
         ),
     ),
     replay_kwargs=dict(
         fraction_goals_are_rollout_goals=0.2,
         fraction_goals_are_env_goals=0.5,
     ),
     algorithm='TD3',
     normalize=False,
     rdim=4,
     render=False,
     env=FullPusher2DEnv,
     use_env_goals=True,
     vae_paths=vae_paths,
     wrap_mujoco_env=True,
     do_state_based_exp=False,
     exploration_noise=0.1,
 )
Example #49
0
#!/usr/bin/python3

"""
   Read a FASTQ file and remove redundancy
   For example when the base calling has some bugs
   I hope you have enough RAM
"""

import sys

fastq = dict()

linenum = 0
for i in open(sys.argv[1]):
    if linenum % 4 == 0:
        readid = i.rstrip()
        fastq[readid] = list()
    fastq[readid].append(i.rstrip())
    linenum += 1

for i in fastq:
    print("\n".join(fastq[i]))
def fill_experiment_table():
    """Populates the table that displays the results from previously ran
    experiments. The results are stored as yaml files in the directory
    that the global variable EXPERIMENT_SERIALIZATION_DIR points to.
    """
    global data_table, table_source, table_data_df

    os.chdir(EXPERIMENT_SERIALIZATION_DIR)
    experiment_files = glob.glob("*.yml")

    names = [''] * len(experiment_files)
    Ls = [0] * len(experiment_files)
    reward_types = [''] * len(experiment_files)
    window_size = [''] * len(experiment_files)
    window_step = [''] * len(experiment_files)
    seq_or_con = [''] * len(experiment_files)

    for i, filename in enumerate(experiment_files):
        with open(filename, 'r') as ymlfile:
            experiment_data = yaml.safe_load(ymlfile)

            names[i] = filename
            Ls[i] = experiment_data['L']
            reward_path = experiment_data['reward_path']
            if 'threshold' in reward_path:
                reward_types[i] = 'threshold' + reward_path.split('threshold_')[
                    1][0:3]
            elif 'top' in experiment_data['reward_path']:
                reward_types[i] = 'top'
            else:
                if 'pear' in reward_path:
                    reward_types[i] = 'continous-pearson'
                elif 'MI_n1' in reward_path:
                    reward_types[i] = 'continous-MI-normalized'
                else:
                    reward_types[i] = 'continous-MI'

            window_size[i], window_step[i] = re.findall(
                r'\d+', reward_path)[0:2]

            seq_or_con[i] = reward_path.split('/')[-1][0:3]

    table_data = dict(
        names=names,
        seq_or_con=seq_or_con,
        Ls=Ls,
        reward_types=reward_types,
        window_size=window_size,
        window_step=window_step,
    )

    table_data_df = pd.DataFrame(data=table_data)
    table_data_df = table_data_df.sort_values(
        ['seq_or_con', 'Ls', 'window_size', 'window_step', 'reward_types'])

    table_source = ColumnDataSource(data=table_data_df)
    table_source.selected.on_change('indices', load_experiment)

    columns = [
        TableColumn(field="names", title="Experiment"),
        TableColumn(field="seq_or_con", title="Sequential/ concurrent"),
        TableColumn(field="Ls", title="L"),
        TableColumn(field="reward_types", title="Reward Type"),
        TableColumn(field='window_size', title="Window Size"),
        TableColumn(field='window_step', title="Window Step"),
    ]
    data_table = DataTable(source=table_source, columns=columns, height=200)
    data_table.sizing_mode = 'stretch_width'

    global pol_table, pol_source

    columns_pol_table = [
        TableColumn(field="pol_name", title="Name"),
        TableColumn(field="overall_regret", title="Overall regret")
    ]

    pol_source = ColumnDataSource(dict(pol_name=[], overall_regret=[]))
    pol_source.selected.on_change('indices', plot_policy)
    pol_table = DataTable(
        source=pol_source, columns=columns_pol_table, height=300)
    pol_table.sizing_mode = 'stretch_width'
Example #51
0
def print_row(title_, nutrients):
    rowformat = "{title:11} {calories:7} {carbs:6} {fat:6} {protein:6}"
    row = dict(title=title_)
    for nutrient in NUTRIENTS:
        row[nutrient] = decorated_nutrient(nutrients[nutrient])
    print(rowformat.format(**row))
Example #52
0
import sys

data = np.zeros((1392,1080))
data[-1,:] = 1.e-5
field = xarray.Dataset({'rate': (['ny', 'nx'], data)})

# Choose where to save the results from this script
output_file = 'INPUT/ice_outflow.nc'

# Save file, making sure that time is the unlimited dimension.
compress = True
if compress:
   fileformat, zlib, complevel, area_dtype = 'NETCDF4', True, 1, 'f4'
else:
   fileformat, zlib, complevel, area_dtype = 'NETCDF3_64BIT_OFFSET', None, None, 'd'

comp = dict(zlib=zlib, complevel=complevel)
encoding = {var: comp for var in field.data_vars}
field.to_netcdf(
    output_file,
    format=fileformat,
    encoding=encoding
)

# Delete the _FillValue attribute.
# MOM will crash if this attribute is present.
# I can't find how to get xarray to not write it.
# Need to have NCO in your path or module load nco before running this script.
# Get rid of all of them:
#call(f'ncatted -h -O -a _FillValue,,d,, {output_file}', shell=True)
Example #53
0
import monochromator
import srs510
import serial
import time
import numpy

configuration_text = open('grating_test.config.txt', 'r').read().split('\n')
parameters = dict()
for line in configuration_text:
    if (len(line) > 0):
        if line[0] != '#':
            l = line.split()
            try:
                parameters[l[0].upper()] = float(l[2])
                print 'Added float ', parameters[
                    l[0].upper()], ' as entry for key ', l[0].upper()
            except:
                parameters[l[0].upper()] = l[2]
                print 'Added text variable ', parameters[
                    l[0].upper()], ' as entry for key ', l[0].upper()

print "Grating Testing Program"
print "Cross your fingers!"

sim = False
srs = srs510.SRS510(sim, parameters)

a = srs.measure_const_SNR(10)
'''
mono = monochromator.Monochromator(False, parameters)
ax.set_xlim(0.01, 0.99)
ax.set_ylim(0, 2.0)

#----------------------------------------
# plot the convolution
ax = fig.add_subplot(223)
ax.plot(x_w, y_w, '-k')

ax.text(0.5,
        0.95,
        "Convolution:\n" + r"$[D \ast W](x)$",
        ha='center',
        va='top',
        transform=ax.transAxes,
        bbox=dict(fc='w', ec='k', pad=8),
        zorder=2)

ax.text(0.5,
        0.05, (r'$[D \ast W](x)$' +
               r'$= \mathcal{F}^{-1}\{\mathcal{F}[D] \cdot \mathcal{F}[W]\}$'),
        ha='center',
        va='bottom',
        transform=ax.transAxes)

for x_loc in (0.2, 0.8):
    y_loc = y_w[x_w <= x_loc][-1]
    ax.annotate('', (x_loc, y_loc), (x_loc, 2.0),
                zorder=1,
                arrowprops=dict(arrowstyle='->', color='gray', lw=2))
Example #55
0
    def _revise_resource_value(self, instance, files: list, user, action_type: str, importer_session_opts: typing.Optional[typing.Dict] = None):
        from geonode.upload.files import ALLOWED_EXTENSIONS
        session_opts = dict(importer_session_opts) if importer_session_opts is not None else {}

        spatial_files_type = get_spatial_files_dataset_type(ALLOWED_EXTENSIONS, files)

        if not spatial_files_type:
            raise Exception("No suitable Spatial Files avaialable for 'ALLOWED_EXTENSIONS' = {ALLOWED_EXTENSIONS}.")

        upload_session, _ = Upload.objects.get_or_create(resource=instance.get_real_instance().resourcebase_ptr, user=user)
        upload_session.resource = instance.get_real_instance().resourcebase_ptr
        upload_session.save()

        _name = instance.get_real_instance().name
        if not _name:
            _name = session_opts.get('name', None) or os.path.splitext(os.path.basename(spatial_files_type.base_file))[0]
        instance.get_real_instance().name = _name

        gs_dataset = None
        try:
            gs_dataset = gs_catalog.get_layer(_name)
        except Exception as e:
            logger.debug(e)

        _workspace = None
        _target_store = None
        if gs_dataset:
            _target_store = gs_dataset.resource.store.name if instance.get_real_instance().subtype == 'vector' else None
            _workspace = gs_dataset.resource.workspace.name if gs_dataset.resource.workspace else None

        if not _workspace:
            _workspace = session_opts.get('workspace', instance.get_real_instance().workspace)
            if not _workspace:
                _workspace = instance.get_real_instance().workspace or settings.DEFAULT_WORKSPACE

        if not _target_store:
            if instance.get_real_instance().subtype == 'vector' or spatial_files_type.dataset_type == 'vector':
                _dsname = ogc_server_settings.datastore_db['NAME']
                _ds = create_geoserver_db_featurestore(store_name=_dsname, workspace=_workspace)
                if _ds:
                    _target_store = session_opts.get('target_store', None) or _dsname

        #  opening Import session for the selected layer
        import_session = gs_uploader.start_import(
            import_id=upload_session.id,
            name=_name,
            target_store=_target_store
        )

        upload_session.set_processing_state(enumerations.STATE_PROCESSED)
        upload_session.import_id = import_session.id
        upload_session.name = _name
        upload_session.complete = True
        upload_session.processed = True
        upload_session.save()

        _gs_import_session_info = GeoServerImporterSessionInfo(
            upload_session=upload_session,
            import_session=import_session,
            spatial_files_type=spatial_files_type,
            dataset_name=None,
            workspace=_workspace,
            target_store=_target_store
        )

        _local_files = []
        _temporary_files = []
        try:
            for _f in files:
                if os.path.exists(_f) and os.path.isfile(_f):
                    _local_files.append(os.path.abspath(_f))
                    try:
                        os.close(_f)
                    except Exception:
                        pass
                else:
                    _suffix = os.path.splitext(os.path.basename(_f))[1] if len(os.path.splitext(os.path.basename(_f))) else None
                    with tempfile.NamedTemporaryFile(mode="wb+", delete=False, dir=settings.MEDIA_ROOT, suffix=_suffix) as _tmp_file:
                        _tmp_file.write(storage_manager.open(_f, 'rb+').read())
                        _tmp_file.seek(0)
                        _tmp_file_name = f'{_tmp_file.name}'
                        _local_files.append(os.path.abspath(_tmp_file_name))
                        _temporary_files.append(os.path.abspath(_tmp_file_name))
                    try:
                        storage_manager.close(_f)
                    except Exception:
                        pass
        except Exception as e:
            logger.exception(e)

        if _local_files:
            try:
                import_session.upload_task(_local_files)
                task = import_session.tasks[0]
                #  Changing layer name, mode and target
                task.layer.set_target_layer_name(_name)
                task.set_update_mode(action_type.upper())
                task.set_target(
                    store_name=_target_store,
                    workspace=_workspace
                )
                transforms = session_opts.get('transforms', None)
                if transforms:
                    task.set_transforms(transforms)
                #  Starting import process
                import_session.commit()
                import_session = import_session.reload()

                try:
                    # Updating Resource with the files replaced
                    if action_type.lower() == 'replace':
                        updated_files_list = storage_manager.replace(instance, files)
                        # Using update instead of save in order to avoid calling
                        # side-effect function of the resource
                        r = ResourceBase.objects.filter(id=instance.id)
                        r.update(**updated_files_list)
                    else:
                        instance.files = files
                except Exception as e:
                    logger.exception(e)

                _gs_import_session_info.import_session = import_session
                _gs_import_session_info.dataset_name = import_session.tasks[0].layer.name
            finally:
                for _f in _temporary_files:
                    try:
                        os.remove(_f)
                    except Exception as e:
                        logger.debug(e)

        return _gs_import_session_info
def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,
				  relation_attr_fields=RELATION_FIELDS,
				  problem_chars=PROBLEMCHARS, default_tag_type='regular'):
	"""Clean and shape node or way XML element to Python dict"""

	node_attribs     = {}
	way_attribs      = {}
	relation_attribs = {}
	tags             = []  # Handle secondary tags the same way for  node , way and relation elements
	way_nodes        = []
	relation_members = []

	nd_position = 0
	member_position = 0

	for child in element :
		#========
		# TAGS  ||
		#========
		if child.tag == 'tag' :
			tags.append(dict())
			key = child.attrib[ 'k' ] if 'k' in child.attrib else None
			if key and  PROBLEMCHARS.search(key) : key = None
			if key and  LOWER_COLON.search(key)  :
				key   = key.split(':')    if key  else None
				type_ = key[0]            if key  else None
				key   = ':'.join(key[1:]) if key  else None
			else :
				type_ = default_tag_type
			tags[len(tags)-1][ 'value' ] = child.attrib[ 'v' ]    if 'v'  in child.attrib   else None
			tags[len(tags)-1][ 'id' ]    = element.attrib[ 'id' ] if 'id' in element.attrib else None
			tags[len(tags)-1][ 'key' ]   = key
			tags[len(tags)-1][ 'type' ]  = type_

	 	#=============
		# way_nodes  ||
		#=============
		elif child.tag == 'nd' :
			way_nodes.append(dict())
			way_nodes[len(way_nodes)-1][ 'id' ]       = element.attrib[ 'id' ] if 'id'  in element.attrib else None
			way_nodes[len(way_nodes)-1][ 'node_id' ]  = child.attrib[ 'ref' ]  if 'ref' in child.attrib   else None
			way_nodes[len(way_nodes)-1][ 'position' ] = nd_position
			nd_position += 1

		#====================
		# relation_members  ||
		#====================
		elif child.tag == 'member' :
			relation_members.append(dict())
			index = len(relation_members)-1
			relation_members[index]['id']          = element.attrib['id'] if 'id'   in element.attrib else None
			relation_members[index]['node_way_id'] = child.attrib['ref' ] if 'ref'  in child.attrib   else None
			relation_members[index]['role']        = child.attrib['role'] if 'role' in child.attrib   else None
			relation_members[index]['type']        = child.attrib['type'] if 'type' in child.attrib   else None
			relation_members[index]['position']    = member_position
			member_position += 1

	#==================
	#=========
	# nodes  ||
	#=========
	if element.tag == 'node':
		for field in node_attr_fields :
			node_attribs[ field ] = element.attrib[ field ] if field in element.attrib else None

		return { 'node': node_attribs, 'node_tags': tags }

	#========
	# WAYS  ||
	#========
	elif element.tag == 'way':
		for field in way_attr_fields :
			way_attribs[ field ] = element.attrib[ field ] if field in element.attrib else None

		return { 'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': tags }
	#=============
	# RELATIONS  ||
	#=============
	elif element.tag == 'relation':
		for field in relation_attr_fields :
			relation_attribs[ field ] = element.attrib[ field ] if field in element.attrib else None

		return { 'relation': relation_attribs, 'relation_members': relation_members, 'relation_tags': tags }
Example #57
0
def raster_drill(layer, x, y, format_):
    """
    Writes the information in the format provided by the user
    and reads some information from the geomet-climate yaml

    :param layer: layer name
    :param x: x coordinate
    :param y: y coordinate
    :param format_: output format (GeoJSON or CSV)

    :return: return the final file fo a given location
    """

    from msc_pygeoapi.process.cccs import (GEOMET_CLIMATE_CONFIG,
                                           GEOMET_CLIMATE_BASEPATH,
                                           GEOMET_CLIMATE_BASEPATH_VRT,
                                           GEOMET_CLIMATE_EPSG)

    if GEOMET_CLIMATE_EPSG is not None:
        pyproj.set_datapath(GEOMET_CLIMATE_EPSG)
    else:
        raise Exception("Could not locate geomet-climate EPSG file.")

    LOGGER.info('start raster drilling')

    if format_ not in ['CSV', 'GeoJSON']:
        msg = 'Invalid format'
        LOGGER.error(msg)
        raise ValueError(msg)

    with open(GEOMET_CLIMATE_CONFIG, encoding='utf-8') as fh:
        cfg = yaml.load(fh, Loader=CLoader)

    data_basepath = GEOMET_CLIMATE_BASEPATH
    climate_model_path = cfg['layers'][layer]['climate_model']['basepath']
    file_path = cfg['layers'][layer]['filepath']
    inter_path = os.path.join(climate_model_path, file_path)

    if ('ABS' in layer
            or 'ANO' in layer and layer.startswith('CANGRD') is False):

        keys = [
            'Model', 'Variable', 'Scenario', 'Period', 'Type', 'Percentile'
        ]
        values = layer.replace('_', '.').split('.')
        layer_keys = dict(zip(keys, values))

        file_name = cfg['layers'][layer]['filename']

    elif 'TREND' not in layer and layer.startswith('CANGRD'):
        keys = ['Model', 'Type', 'Variable', 'Period']
        values = layer.replace('_', '.').split('.')
        layer_keys = dict(zip(keys, values))

        data_basepath = GEOMET_CLIMATE_BASEPATH_VRT
        climate_model_path = cfg['layers'][layer]['climate_model']['basepath']
        file_path = cfg['layers'][layer]['filepath']
        inter_path = os.path.join(climate_model_path, file_path)

        file_name = '{}.vrt'.format(cfg['layers'][layer]['filename'])

    elif layer.startswith('SPEI'):
        keys = ['Variable', 'Variation', 'Scenario', 'Period', 'Percentile']
        values = layer.replace('-', '.').replace('_', '.').split('.')
        layer_keys = dict(zip(keys, values))
        layer_keys['Type'] = 'ABS'

        file_name = cfg['layers'][layer]['filename']

    elif layer.startswith('INDICES'):
        keys = ['Model', 'Variable', 'Scenario', 'Percentile']
        values = layer.replace('_', '.').split('.')
        layer_keys = dict(zip(keys, values))
        layer_keys['Type'] = 'ABS'

        file_name = cfg['layers'][layer]['filename']

    else:
        msg = 'Not a valid or time enabled layer: {}'.format(layer)
        LOGGER.error(msg)
        raise ValueError(msg)

    srs = osr.SpatialReference()
    srs.ImportFromWkt(cfg['layers'][layer]['climate_model']['projection'])
    inProj = Proj(init='epsg:4326')
    outProj = Proj(srs.ExportToProj4())
    _x, _y = transform(inProj, outProj, x, y)

    ds = os.path.join(data_basepath, inter_path, file_name)

    data = get_location_info(ds, _x, _y, cfg['layers'][layer], layer_keys)
    output = serialize(data, cfg['layers'][layer], format_, x, y)

    return output
Example #58
0
from __future__ import division, print_function, absolute_import

import numpy as np
from random import shuffle
from math import ceil
import pickle
import subprocess

from .helpers import loadAudio
from .conf import conf
from config import ROOT_DIR


library = dict()

# Shuffle to arrays in unison
def unison_shuffled_copies(a, b):
    assert len(a) == len(b)
    p = np.random.permutation(len(a))
    return a[p], b[p]

def loadData():

    # reload audio object from file
    try:
        audio_file = open(ROOT_DIR + r'/resources/audio.pkl', 'rb')
        audio_matrix, classifications = pickle.load(audio_file)
        audio_file.close()
        return unison_shuffled_copies(audio_matrix, classifications)
    except:
        # If no pickle file
Example #59
0
    "Cross product of elements in a and elements in b."
    return [s+t for s in a for t in b]


# Global variables
ROWS = 'ABCDEFGHI'
COLS = '123456789'
BOXES = cross(ROWS, COLS)
ROW_UNITS = [cross(r, COLS) for r in ROWS]
COLUMN_UNITS = [cross(ROWS, c) for c in COLS]
SQUARE_UNITS = [cross(rs, cs) for rs in ('ABC','DEF','GHI')
                              for cs in ('123','456','789')]
DIAGONALS = [[r+c for r, c in zip(ROWS, COLS)],
             [r+c for r, c in zip(ROWS[-1::-1], COLS)]]
UNITLIST = ROW_UNITS + COLUMN_UNITS + SQUARE_UNITS + DIAGONALS
UNITS = dict((s, [u for u in UNITLIST if s in u]) for s in BOXES)
PEERS = dict((s, set(sum(UNITS[s],[]))-set([s])) for s in BOXES)


def assign_value(values, box, value):
    """
    Please use this function to update your values dictionary!
    Assigns a value to a given box. If it updates the board record it.
    """
    values[box] = value
    if len(value) == 1:
        assignments.append(values.copy())
    return values


def naked_twins(values):
Example #60
0
def main(imgfile, outfile, rows, cols, normalized):
    """Define regions of interest (ROIs) on an image, and return bounding boxes.
    
    Use number keys, 1 - 9, to select ROIs. ROI position can be
    adjusted with mouse or arrow keys. ROI size can be adjusted with
    mouse or shift+arrow keys.

    Assumes a grid of ROIs, but no constraints on ROI overlap.
    
    Bounding boxes are (minrow, mincol, maxrow, maxcol) to be
    consistent with skimage. Returned bounding boxes are returned in
    YAML format for easy parsing. See extractROI for a program that
    operates on ROIs.

    """
    img = np.squeeze(io.imread(imgfile))
    fig, main_ax = plt.subplots()
    plt.subplots_adjust(bottom=0.2)
    main_ax.imshow(img, cmap="gray")

    nplates = rows * cols
    irows, icols = img.shape

    selections = SelectorCollection()
    selections.outfile = outfile
    selections.rows = rows
    selections.cols = cols
    selections.normalized = normalized
    selections.colors = plt.cm.tab10(np.linspace(0, 1, nplates))
    selections.selectors = \
        [PersistentRectangleSelector(
            main_ax,
            selections.select_callback,
            drawtype='box',
            useblit=False,
            button=[1],
            minspanx=5, minspany=5,
            spancoords='data',
            rectprops = dict(facecolor=selections.colors[i],alpha=0.5),
            interactive=True)
         for i in range(nplates)]

    bboxes = subdivide_region((irows, icols), rows, cols)

    for i, bbox in enumerate(bboxes):
        minr, minc, maxr, maxc = bbox
        selections.selectors[i].extents = (minc, maxc, minr, maxr)
        selections.selectors[i].set_visible(True)

    selections.current = 0
    selections.activate_current()

    ax_normalize = plt.axes([0.45, 0.05, 0.2, 0.075])
    btn_normalize = Button(ax_normalize, "Normalize")
    btn_normalize.on_clicked(selections.normalize_and_update)

    ax_apply = plt.axes([0.7, 0.05, 0.2, 0.075])
    btn_apply = Button(ax_apply, "Apply and Close")
    btn_apply.on_clicked(selections.write_geometry)

    plt.connect('key_press_event', selections.choose_selector)
    plt.sca(main_ax)
    plt.show()