Example #1
0
def dump(new_data):
    '''
    Replace the entire datastore with a passed data structure

    CLI Example:

    .. code-block:: bash

        salt '*' data.dump '{'eggs': 'spam'}'
    '''
    if not isinstance(new_data, dict):
        if isinstance(ast.literal_eval(new_data), dict):
            new_data = ast.literal_eval(new_data)
        else:
            return False

    try:
        datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
        with salt.utils.fopen(datastore_path, 'w+b') as fn_:
            serial = salt.payload.Serial(__opts__)
            serial.dump(new_data, fn_)

        return True

    except (IOError, OSError):
        return False
Example #2
0
def reporte_lgtbi(request):
    """
    Diciembre 21, 2015
    Autor: Milton Lenis

    Permite conocer el numero de personal de apoyo que pertenecen a la comunidad LGTBI
    """
    tipoTenant = request.tenant.obtenerTenant()

    if tipoTenant.schema_name == 'public':
        tabla = PublicPersonalApoyoView
    else:
        tabla = TenantPersonalApoyoView

    if request.is_ajax():
        departamentos = None if request.GET['departamentos'] == 'null'  else ast.literal_eval(request.GET['departamentos'])
        genero = None if request.GET['genero'] == 'null'  else ast.literal_eval(request.GET['genero'])

        consultas = [
            "list("+tabla.__name__+".objects.filter(estado = 0,ciudad__departamento__id__in=%s,genero__in=%s).annotate(descripcion=F('lgtbi')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
            "list("+tabla.__name__+".objects.filter(estado = 0,ciudad__departamento__id__in=%s).annotate(descripcion=F('lgtbi')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
            "list("+tabla.__name__+".objects.filter(estado = 0,genero__in=%s).annotate(descripcion=F('lgtbi')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
            "list("+tabla.__name__+".objects.filter(estado = 0).annotate(descripcion=F('lgtbi')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
        ]

        lgtbi = ejecutar_casos_recursivos(consultas, departamentos, genero, tipoTenant)
        lgtbi = tipoTenant.ajustar_resultado(lgtbi)
        if True in lgtbi:
            lgtbi['PERTENECE A LA COMUNIDAD LGTBI'] = lgtbi[True]
            del lgtbi[True]
        if False in lgtbi:
            lgtbi['NO PERTENECE A LA COMUNIDAD LGTBI'] = lgtbi[False]
            del lgtbi[False]

        return JsonResponse(lgtbi)

    else:
        lgtbi = list(tabla.objects.filter(estado = 0).annotate(descripcion=F('lgtbi')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))
        lgtbi = tipoTenant.ajustar_resultado(lgtbi)
        print(lgtbi)
        if True in lgtbi:
            lgtbi['PERTENECE A LA COMUNIDAD LGTBI'] = lgtbi[True]
            del lgtbi[True]
        if False in lgtbi:
            lgtbi['NO PERTENECE A LA COMUNIDAD LGTBI'] = lgtbi[False]
            del lgtbi[False]

    visualizaciones = [1, 3, 5, 6]
    form = FiltrosPersonalApoyoForm(visualizaciones=visualizaciones)

    return render(request, 'personal_apoyo/base_personal_apoyo.html', {
        'nombre_reporte' : 'Cantidad de personal de apoyo que pertenece a la comunidad LGTBI',
        'url_data' : 'reporte_lgtbi_personal_apoyo',
        'datos': lgtbi,
        'visualizaciones': visualizaciones,
        'form': form,
        'actor': 'Personal de Apoyo',
        'fecha_generado': datetime.now(),
        'nombre_columna':'Descripción'
    })
Example #3
0
    def send_malware(self):
        flag=True
        try:
            malware_instance = open(self.malware,'rb').read()
            hashtag= hashlib.sha1(malware_instance).hexdigest()
            malware_length = len(malware_instance)
            sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
            ssl_sock = ssl.wrap_socket(sock=sock,ca_certs=self.cacert,certfile=self.server_certificate,keyfile=self.server_key,cert_reqs=ssl.CERT_REQUIRED)
            ssl_sock.connect(ioc_server)
            print("Sending info for subject %s with hash value: %s and length: %d"%(self.malware,hashtag,malware_length))
            malware_info.append((hashtag,time.time()))
            ssl_sock.sendall(str(('identity',self.id,hashtag,malware_length,time.time(),self.malware)))
            data = ssl_sock.recv()
            datatuple = literal_eval(data)
            if datatuple[0]:
                print(' IoC Server Message: '+datatuple[1])
                ssl_sock.sendall(malware_instance)
                data = ssl_sock.recv()
                datatuple =literal_eval(data)
                if datatuple[0]:
                    print(' IoC Server Message: '+datatuple[1])
                else:
                    flag=False
                    print(' IoC Server Message: '+datatuple[1])
            else:
                flag=False
                print(' IoC Server Message: '+datatuple[1])
            print(data)

        except Exception,e:
            info =str(e)
            print(info)
            Logger().errorLogging(info)
Example #4
0
    def _signup_create_user(self, cr, uid, values, context=None):
        """ create a new user from the template user """
        ir_config_parameter = self.pool.get("ir.config_parameter")
        template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, "auth_signup.template_user_id", "False"))
        assert template_user_id and self.exists(
            cr, uid, template_user_id, context=context
        ), "Signup: invalid template user"

        # check that uninvited users may sign up
        if "partner_id" not in values:
            if not literal_eval(ir_config_parameter.get_param(cr, uid, "auth_signup.allow_uninvited", "False")):
                raise SignupError("Signup is not allowed for uninvited users")

        assert values.get("login"), "Signup: no login given for new user"
        assert values.get("partner_id") or values.get("name"), "Signup: no name or partner given for new user"

        # create a copy of the template user (attached to a specific partner_id if given)
        values["active"] = True
        context = dict(context or {}, no_reset_password=True)
        try:
            with cr.savepoint():
                return self.copy(cr, uid, template_user_id, values, context=context)
        except Exception, e:
            # copy may failed if asked login is not available.
            raise SignupError(ustr(e))
Example #5
0
    def test_get(self):
        username = "******"
        email = "mailto:[email protected]"
        password = "******"
        auth = "Basic %s" % base64.b64encode("%s:%s" % (username, password))
        form = {'username':username,'email': email,'password':password,'password2':password}
        response = self.client.post(reverse(views.register),form, X_Experience_API_Version="1.0.0")        

        r = self.client.get(self.url, self.testparams1, X_Experience_API_Version="1.0.0", Authorization=self.auth)
        self.assertEqual(r.status_code, 200)
        robj = ast.literal_eval(r.content)
        self.assertEqual(robj['test'], self.teststate1['test'])
        self.assertEqual(robj['obj']['agent'], self.teststate1['obj']['agent'])
        self.assertEqual(r['etag'], '"%s"' % hashlib.sha1(r.content).hexdigest())

        r2 = self.client.get(self.url, self.testparams2, X_Experience_API_Version="1.0.0", Authorization=self.auth)
        self.assertEqual(r2.status_code, 200)
        robj2 = ast.literal_eval(r2.content)
        self.assertEqual(robj2['test'], self.teststate2['test'])
        self.assertEqual(robj2['obj']['agent'], self.teststate2['obj']['agent'])
        self.assertEqual(r2['etag'], '"%s"' % hashlib.sha1(r2.content).hexdigest())
        
        r3 = self.client.get(self.url, self.testparams3, X_Experience_API_Version="1.0.0", Authorization=self.auth)
        self.assertEqual(r3.status_code, 200)
        robj3 = ast.literal_eval(r3.content)
        self.assertEqual(robj3['test'], self.teststate3['test'])
        self.assertEqual(robj3['obj']['agent'], self.teststate3['obj']['agent'])
        self.assertEqual(r3['etag'], '"%s"' % hashlib.sha1(r3.content).hexdigest())

        r4 = self.client.get(self.url, self.testparams4, X_Experience_API_Version="1.0.0", Authorization=auth)
        self.assertEqual(r4.status_code, 200)
        robj4 = ast.literal_eval(r4.content)
        self.assertEqual(robj4['test'], self.teststate4['test'])
        self.assertEqual(robj4['obj']['agent'], self.teststate4['obj']['agent'])
        self.assertEqual(r4['etag'], '"%s"' % hashlib.sha1(r4.content).hexdigest())
Example #6
0
def nacionalidad(request):
    """
    Noviembre 14, 2015
    Autor: Daniel Correa

    Permite conocer el numero de deportistas colombianos y extranjeros
    """
    tipoTenant = request.tenant.obtenerTenant()

    if tipoTenant.schema_name == 'public':
        tabla = PublicDeportistaView
    else:
        tabla = TenantDeportistaView

    if request.is_ajax():
        departamentos = None if request.GET['departamentos'] == 'null'  else ast.literal_eval(request.GET['departamentos'])
        genero = None if request.GET['genero'] == 'null'  else ast.literal_eval(request.GET['genero'])

        consultas = [
            "list("+tabla.__name__+".objects.filter(estado__in=[0,2],ciudad_residencia__departamento__id__in=%s,genero__in=%s).annotate(descripcion=F('nacionalidad__nombre')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
            "list("+tabla.__name__+".objects.filter(estado__in=[0,2],ciudad_residencia__departamento__id__in=%s).annotate(descripcion=F('nacionalidad__nombre')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
            "list("+tabla.__name__+".objects.filter(estado__in=[0,2],genero__in=%s).annotate(descripcion=F('nacionalidad__nombre')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
            "list("+tabla.__name__+".objects.filter(estado__in=[0,2]).annotate(descripcion=F('nacionalidad__nombre')).values('id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))",
        ]

        nacionalidades = ejecutar_casos_recursivos(consultas,departamentos,genero,tipoTenant)
        nacionalidades = tipoTenant.ajustar_resultado(nacionalidades)

        return JsonResponse(nacionalidades)

    else:
        nacionalidades = list(tabla.objects.filter(estado__in=[0,2]).annotate(descripcion=F('nacionalidad__nombre')).values('nombres','id','descripcion','entidad').annotate(cantidad=Count('id',distinct=True)))
        nacionalidades = tipoTenant.ajustar_resultado(nacionalidades)

    return nacionalidades
Example #7
0
def deferred_add_new_album_details(album_id, added, album, artist, channel, img, tags, url, users):
    try:
        if album_id not in list_model.get_list():
            list_model.add_to_list(album_id)
        albums_model.add_to_albums(album_id, artist=artist, name=album, url=url, img=img, channel=channel)
        if added:
            albums_model.update_album_added(album_id, added)
        if not img:
            deferred_process_album_cover.delay(album_id)
        if tags is not None:
            if isinstance(tags, str):
                tags = ast.literal_eval(tags)
            deferred_process_tags.delay(album_id, tags)
        else:
            deferred_process_album_tags.delay(album_id)
        if users is not None:
            if isinstance(users, str):
                users = ast.literal_eval(users)
            deferred_process_users.delay(album_id, users)
        deferred_check_album_url.delay(album_id)
    except DatabaseError as e:
        print(f'[db]: failed to add new album details for [{album_id}] {album} by {artist}')
        print(f'[db]: {e}')
    else:
        print(f'[db]: added new album details for [{album_id}] {album} by {artist}')
Example #8
0
File: conv2d.py Project: bddppq/tvm
def _alter_conv2d_layout(attrs, inputs, tinfos, F):

    copy_inputs = [s for s in inputs]

    data = tinfos[0]
    kernel = tinfos[1]

    import ast
    padding = ast.literal_eval(str(attrs['padding']))
    stride = ast.literal_eval(str(attrs['strides']))

    wkl = _get_workload(data, kernel, stride, padding, data.dtype)
    oc_bn = 1
    kernel_shape = util.get_const_tuple(kernel.shape)
    for oc_bn in range(16, 1, -1):
        if kernel_shape[0] % oc_bn == 0:
            break

    new_attrs = {k: attrs[k] for k in attrs.keys()}
    new_attrs["kernel_layout"] = 'OIHW%do' % (oc_bn)

    if F.__name__ == 'tvm.relay.op':
        # Derive channels for frontends (e.g ONNX) that miss "channel" field.
        new_attrs["channels"] = inputs[1].checked_type.shape[attrs['kernel_layout'].index('O')]

    if F.__name__ == 'nnvm.symbol':
        out = F.contrib.conv2d_NCHWc(*copy_inputs, **new_attrs)
    else:
        out = F.nn.contrib_conv2d_nchwc(*copy_inputs, **new_attrs)

    return out
Example #9
0
    def run(self):
        """Run the directive."""
        if pygal is None:
            msg = req_missing(['pygal'], 'use the Chart directive', optional=True)
            return [nodes.raw('', '<div class="text-error">{0}</div>'.format(msg), format='html')]
        options = {}
        if 'style' in self.options:
            style_name = self.options.pop('style')
        else:
            style_name = 'BlueStyle'
        if '(' in style_name:  # Parametric style
            style = eval('pygal.style.' + style_name)
        else:
            style = getattr(pygal.style, style_name)
        for k, v in self.options.items():
            options[k] = literal_eval(v)

        chart = getattr(pygal, self.arguments[0])(style=style)
        chart.config(**options)
        for line in self.content:
            label, series = literal_eval('({0})'.format(line))
            chart.add(label, series)
        data = chart.render().decode('utf8')
        if _site and _site.invariant:
            import re
            data = re.sub('id="chart-[a-f0-9\-]+"', 'id="chart-foobar"', data)
            data = re.sub('#chart-[a-f0-9\-]+', '#chart-foobar', data)
        return [nodes.raw('', data, format='html')]
Example #10
0
 def on_privmsg(self, nick, chan, msg):
     if chan == "#osp" and msg.startswith("!"):
         p = r"^([a-zA-Z]+)\((.+)*\)"
         r = re.search(p, msg[1:])
         if r is None:
             return
         else:
             cmd, args = r.groups()
         if cmd in methodList:
             scribus.setRedraw(False)
             try:
                 if args is None:
                     r = getattr(scribus, cmd)()
                 elif type(ast.literal_eval(args)) is tuple:
                     r = getattr(scribus, cmd)(*ast.literal_eval(args))
                 else:
                     r = getattr(scribus, cmd)(ast.literal_eval(args))
                 self.privmsg(chan, "called %s" % cmd)
                 self.privmsg(chan, "returned %s" % r if r is not None else "nothing")
                 # Ugly workaround to force scribus refreshing
                 scribus.zoomDocument(101)
                 scribus.zoomDocument(100)
             except TypeError:
                 self.privmsg(chan, "%s" % getattr(scribus, cmd).__doc__)
             scribus.setRedraw(True)
         elif cmd == "help":
             for i in info(scribus):
                 self.privmsg(chan, "%s" % i)
         else:
             self.privmsg(chan, "No such a command: %s" % cmd)
Example #11
0
def incrementclientip():
    netparts = []
    iplist = []
    startip = 2
    
    #p = subprocess.Popen(['/usr/local/openvpn_as/scripts/confdba', '-s'], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
    #tmpdba, err = p.communicate()
    #confdba = ast.literal_eval(tmpdba)
    
    p = subprocess.Popen(['cat', '/root/bin/confdbas.txt'], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
    tmpdba, err = p.communicate()
    confdba = ast.literal_eval(tmpdba)
    
    for key1,val1 in confdba.items():
        if key1 == 'Default':
            if isinstance(val1, dict):
                for key2,val2 in val1.items():
                    if key2 == 'vpn.server.static.0.network':
                        network = val1.get('vpn.server.static.0.network', None)
                        netmask = val1.get('vpn.server.static.0.netmask_bits', None)
                        
    startipdec = ip2int(network)                        #decimal form of the network address
    maxclients = pow(2,(32 - int(netmask))) - startip   #decimal form of the number of hosts with given mask
    print maxclients
    minipdec = startipdec + startip + 8192                        #lowest ip in decimal format
    maxipdec = startipdec + maxclients + startip - 2 + 8192      #highest ip in decimal format
    minip = int2ip(minipdec)                                #lowest ip in dotted notation
    maxip = int2ip(maxipdec)                                #highest ip in dotted notation

    #p = subprocess.Popen(['/usr/local/openvpn_as/scripts/confdba', '-us'], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
    #tmpdba, err = p.communicate()
    #userdba = ast.literal_eval(tmpdba)

    p = subprocess.Popen(['cat', '/root/bin/confdbaus.txt'], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
    tmpdba, err = p.communicate()
    userdba = ast.literal_eval(tmpdba)

    for key1,val1 in userdba.items():
        if isinstance(val1, dict):
            for key2,val2 in val1.items():
                usertype = val1.get(key2, val2)
                if usertype == 'user_connect' or usertype == 'user_compile':
                    userip = val1.get('conn_ip', None)
                    if userip:
                        try:
                            socket.inet_aton(userip)  #Makes sure the groups are in IPv4 network form for sorted below
                            iplist.append(userip)
                        except:
                            pass
    
    for seqdec in range(minipdec, maxipdec):
        seqip = int2ip(seqdec)
        if checkip(seqip, iplist):
            pass
        else:
            newclientip = seqip
            break
    
    print newclientip
    return newclientip
Example #12
0
def sentences_by_rule():
    if request.method == 'GET':
        topic = ast.literal_eval(request.args.get('topic'))
        sources_ids = ast.literal_eval(request.args.get('sources'))
        format_string = formatstring(sources_ids)
        isRuleset = True if request.args.get('isRuleset') == 'true' else False
        rule_id = ast.literal_eval(request.args.get('rule_id'))
        fromDate = format_fromDate(ast.literal_eval(request.args.get('fromDate')))
        toDate = format_toDate(ast.literal_eval(request.args.get('toDate')))

        app.logger.info('GET sentences_by_rule: topic(%s), sources_ids(%s), isRuleset(%s), ruleset_or_rule_id(%s)'
                        % ('%s', format_string, '%s', '%s')
                        % tuple([topic] + sources_ids + [isRuleset, rule_id]))

        db = g.db
        cur = db.cursor()

        sentences = []
        if isRuleset:
            cur.execute(queries['get_sentences_by_ruleset'].format(fromDate, toDate)
                        % ('%s', '%s', format_string), [rule_id, topic] + sources_ids)
            sentences.extend([dict(sentence_id  = row[0],
                                   full_text    = row[1],
                                   rules        = map(int, row[2].split(',')))
                              for row in cur.fetchall()])
        else:
            cur.execute(queries['get_sentences_by_rule'].format(fromDate, toDate)
                        % ('%s', '%s', format_string), [rule_id, topic] + sources_ids)
            sentences.extend([dict(sentence_id  = row[0],
                                   full_text    = row[1],
                                   rules        = [])
                              for row in cur.fetchall()])

        return jsonify(sentences=sentences)
Example #13
0
def rulesets():
    if request.method == 'POST':
        topic = ast.literal_eval(request.form['topic'])
        name = request.form['name']

        app.logger.info('POST rulesets: topic(%s), name(%s)' % (topic, name))

        db = g.db
        cur = db.cursor()
        cur.execute(queries['add_ruleset'], (topic, name, topic))
        db.commit()
        cur.execute(queries['get_ruleset'], (topic, ))
        rulesets = [dict(category_seq=int(row[0]), name=row[1]) for row in cur.fetchall()]
        return jsonify(rulesets=rulesets)

    if request.method == 'DELETE':
        topic = ast.literal_eval(request.form['topic'])
        category_seq = ast.literal_eval(request.form['category_seq'])

        app.logger.info('DELETE rulesets: topic(%s), category_seq(%s)' % (topic, category_seq))

        db = g.db
        cur = db.cursor()
        cur.execute(queries['get_rules'], (topic, category_seq))
        rd = g.rd
        for row in cur.fetchall():
            rd.delete(int(row[0]))
        cur.execute(queries['del_rules_word_relations'], (topic, category_seq))
        cur.execute(queries['del_rules_sentence_relations'], (topic, category_seq))
        cur.execute(queries['del_rules'], (topic, category_seq))
        cur.execute(queries['del_ruleset'], (topic, category_seq))
        db.commit()
        return jsonify(deleted=dict(topic=topic, category_seq=category_seq))
Example #14
0
    def test_get(self):
        r = self.client.get(reverse(agent_profile), self.testparams1, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
        self.assertEqual(r.status_code, 200)
        
        robj = ast.literal_eval(r.content)
        self.assertEqual(robj['test'], self.testprofile1['test'])
        self.assertEqual(robj['obj']['agent'], self.testprofile1['obj']['agent'])
        self.assertEqual(r['etag'], '"%s"' % hashlib.sha1('%s' % self.testprofile1).hexdigest())

        r2 = self.client.get(reverse(agent_profile), self.testparams2, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
        self.assertEqual(r2.status_code, 200)
        robj2 = ast.literal_eval(r2.content)
        self.assertEqual(robj2['test'], self.testprofile2['test'])
        self.assertEqual(robj2['obj']['agent'], self.testprofile2['obj']['agent'])
        self.assertEqual(r2['etag'], '"%s"' % hashlib.sha1('%s' % self.testprofile2).hexdigest())
        
        r3 = self.client.get(reverse(agent_profile), self.testparams3, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
        self.assertEqual(r3.status_code, 200)
        robj3 = ast.literal_eval(r3.content)
        self.assertEqual(robj3['test'], self.testprofile3['test'])
        self.assertEqual(robj3['obj']['agent'], self.testprofile3['obj']['agent'])
        self.assertEqual(r3['etag'], '"%s"' % hashlib.sha1('%s' % self.testprofile3).hexdigest())

        r4 = self.client.get(reverse(agent_profile), self.testparams4, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
        self.assertEqual(r4.status_code, 200)
        robj4 = ast.literal_eval(r4.content)
        self.assertEqual(robj4['test'], self.otherprofile1['test'])
        self.assertEqual(robj4['obj']['agent'], self.otherprofile1['obj']['agent'])
        self.assertEqual(r4['etag'], '"%s"' % hashlib.sha1('%s' % self.otherprofile1).hexdigest())
Example #15
0
def syncfiles(args):
    if len(args) < 3:
        printtofile ('args not enough, need: files src_host dest_hosts')
        return
    dest_hosts = []
    src_host = ''
    files = ast.literal_eval(args[2])
    if len(args) >= 4:
        src_host = args[3]
    if len(args) >= 5:
        dest_hosts = ast.literal_eval(args[4])
    if len(dest_hosts) == 0:
        if len(src_host) == 0:
            dest_hosts = primary_host + replicas_host
        else:
            dest_hosts = replicas_host

    for file in files:
        fullfilepath = ''
        if (src_host == ''):
            fullfilepath = base_dir + '/' + path.abspath(file).replace(path.abspath(local_base), '')
            printtofile ('local to remote : ' + fullfilepath)
        else:
            fullfilepath = base_dir + '/' + file
        for dest_host in dest_hosts:
            dest_host_path = loginuser + '@' + dest_host + ':' + fullfilepath
            if src_host == '':
                # using local file
                os.system(scp_local + file + ' ' + dest_host_path)
            else:
                os.system(scp_remote + src_host + ':' + fullfilepath + ' ' + dest_host_path)
    printtofile ('finished.')
    def set_options(self):
        self.from_types = set(x.strip() for x in
                           self.options.get("from_types", "").split(",")
                           if x)
        # If the types to act on are container types
        # enabling this will put the referenced image inside the
        # current item - else, its relative path will be kept
        self.embed_images = ast.literal_eval(self.options.get(
                                             "embed_images", "False"))
        # NB: embed_images will fail for ordinary Documents or news items
        # as it is intended to be used for folderidsh content types.
        # It is designed to work if the item will only become
        # folderish later on the pipeline,tough

        # Some of the images may have "resolveuid" links which
        # are redirected to the proper value by the source Plone site
        # (if it is the case)
        # Enabling this replaces redirected urls in the "SRC" attribute
        # of <img tags for the final value.
        self.replace_references = ast.literal_eval(self.options.get(
                                             "replace_references", "True"))
        self.source_prefix = self.options.get("source_prefix",
            "http://example.com/some_plone_site").strip().rstrip("/")
        # not used.
        self.load_external_images = False
        # Wormhole is a construct depending on
        # sc.transmogrifier.utils.whitehole blueprint
        # which makes forked items (int his case, fetcehd images)
        # appear earlier on the  pipeline instead of just
        # in the blueprints ahead of this
        self.use_wormhole = ast.literal_eval(self.options.get(
                                 "use_wormhole", "False"))
        self.use_jsonmigrator = ast.literal_eval(self.options.get(
                                 "use_jsonmigrator", "False"))
Example #17
0
    def service_create(self, context, service):
        LOG.debug("service_create")
        self.k8s_api = k8s.create_k8s_api(context, service.bay_uuid)
        manifest = k8s_manifest.parse(service.manifest)
        try:
            resp = self.k8s_api.create_namespaced_service(body=manifest,
                                                          namespace='default')
        except rest.ApiException as err:
            raise exception.KubernetesAPIFailed(err=err)

        if resp is None:
            raise exception.ServiceCreationFailed(bay_uuid=service.bay_uuid)

        service['uuid'] = resp.metadata.uid
        service['name'] = resp.metadata.name
        service['labels'] = ast.literal_eval(resp.metadata.labels)
        service['selector'] = ast.literal_eval(resp.spec.selector)
        service['ip'] = resp.spec.cluster_ip
        service_value = []
        for p in resp.spec.ports:
            ports = p.to_dict()
            if not ports['name']:
                ports['name'] = 'k8s-service'
            service_value.append(ports)

        service['ports'] = service_value

        return service
Example #18
0
 def test_parse_in_error(self):
     try:
         1/0
     except Exception:
         with self.assertRaises(SyntaxError) as e:
             ast.literal_eval(r"'\U'")
         self.assertIsNotNone(e.exception.__context__)
    def _bind_to_resource(self, resource_key):
        """This function binds the Kernel to a specific resource defined in
           "resource_key".
        """
        if resource_key not in _KERNEL_INFO_HS["machine_configs"]:
            if "*" in _KERNEL_INFO_HS["machine_configs"]:
                # Fall-back to generic resource key
                resource_key = "*"
            else:
                raise NoKernelConfigurationError(kernel_name=_KERNEL_INFO_HS["name"], resource_key=resource_key)

        cfg = _KERNEL_INFO_HS["machine_configs"][resource_key]
        executable = "python"
        ARG1 = self.get_arg("--inputfile1=")
        ARG2 = self.get_arg("--inputfile2=")

        ARG1 = ast.literal_eval(ARG1)
        ARG2 = ast.literal_eval(ARG2)

        arguments  = [self.get_arg("--dist_file="),"--element_set1"]
        arguments.extend(ARG1)
        arguments.extend(["--element_set2"])
        arguments.extend(ARG2)
        arguments.extend(["--output_file",self.get_arg("--outputfile=")])

        self._executable  = executable
        self._arguments   = arguments
        self._environment = cfg["environment"]
        self._uses_mpi    = cfg["uses_mpi"]
        self._pre_exec    = cfg["pre_exec"]
        self._post_exec   = None
Example #20
0
def get(function, duration, *args):
    # type: (function, int, object) -> object or None
    """
    Gets cached value for provided function with optional arguments, or executes and stores the result
    :param function: Function to be executed
    :param duration: Duration of validity of cache in hours
    :param args: Optional arguments for the provided function
    """

    try:
        key = _hash_function(function, args)
        cache_result = cache_get(key)
        if cache_result:
            if _is_cache_valid(cache_result['date'], duration):
                return ast.literal_eval(cache_result['value'].encode('utf-8'))

        fresh_result = repr(function(*args))
        if not fresh_result:
            # If the cache is old, but we didn't get fresh result, return the old cache
            if cache_result:
                return cache_result
            return None

        cache_insert(key, fresh_result)
        return ast.literal_eval(fresh_result.encode('utf-8'))
    except Exception:
        return None
Example #21
0
def get_scheduler_details(request, name):
    logger.debug("Retrieving Scheduler %s information" % name)
    scheduler = Scheduler.objects.get(name=name)
    try:
        celery_task = djcelery.models.TaskState.objects.get(task_id=scheduler.task_uuid)
        celery_result = ast.literal_eval(celery_task.result)
        servers_response = celery_result[0]["messages"]
    except:
        logger.error("Cannot find celery task %s in database" % scheduler.task_uuid)
        servers_response = []
    tasks_list = []
    for task in scheduler.tasks.iterator():
        if task.result:
            result = ast.literal_eval(task.result)
        else:
            result = []
        task_obj = {"state": task.status,
                    "order": task.order,
                    "name": task.name,
                    "filter": task.filters,
                    "agent": task.agent,
                    "action": task.action,
                    "parameters": task.parameters,
                    "run_at": task.run_at,
                    "servers_response": result,
                    "sched_name":scheduler.name
                    }
        tasks_list.append(task_obj)
    return HttpResponse(render_to_string('widgets/chain/scheddetails.html', {'tasks': tasks_list}, context_instance=RequestContext(request)))
Example #22
0
def data(request):
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'choice.settings')
    django.setup()
    if request.method == "POST":
        compType = request.POST["compType"]
        if compType[-1] == ' ':
            compType = compType[:len(compType)-1]
        # Create the HttpResponse object with the appropriate CSV header.
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="{0}.csv"'.format(compType)
        writer = csv.writer(response)
        # retrieve the correct dataset
        samples = choiceData.objects.filter(compType=compType)
        writer.writerow(['ID', 'Username', 'Type', 'Choice1', 'Choice2', 'Selection', 'TTChoose', 'Familiar', 'Time of Survey Completion', 'IP'])
        count = 0
        for sample in samples:
            count += 1
            username = str(sample.username).replace('"','')
            CType = str(sample.compType).replace('"','')
            matchups = ast.literal_eval(sample.matchupNames)
            selection = ast.literal_eval(sample.choices)
            TTChoose = ast.literal_eval(sample.TTChoose)
            familiarity = sample.familiarity
            CTime = ast.literal_eval(sample.CTime)
            ip = sample.ip
            for i in range(len(selection)):
                row = [count, username, CType, matchups[i][0], matchups[i][1], selection[i], TTChoose[i], familiarity, CTime, ip]
                writer.writerow(row)
        return response
Example #23
0
def custom_tecov2_nee(task_id,site,filename,years,forecast,upload):#,modWeather):
    # Header row
    header='Year DOY hour     T_air q1    precip q2       PAR q3     R_net q4    ustar         NEE filled_NEE         LE  filled_LE\n'
    head =['Year','DOY','hour','T_air','q1','precip','q2','PAR','q3','R_net','q4','ustar','NEE','filled_NEE','LE','filled_LE']
    #fixed width list of values
    wd=[4,4,5,11,2,11,2,11,2,11,2,11,11,11,11,11]
    #set working diectory
    wkdir = basedir + "celery_data/" + str(task_id)
    #wkdir = "/home/mstacy/test"
    os.chdir(wkdir)
    #open file and set header
    outfile = open(filename,"w")
    outfile.write(header)
    #open mongo connection
    db = Connection(mongoHost).teco
    #safe eval to get start and end dates
    yr=ast.literal_eval(years)
    start = yr[0]
    end = yr[1] 
    #figure time step currrently only working for Hourly and half hourly
    #stepRes=db.forcing.find({"Site":site}).sort([('observed_date',1),('hour',1)]).limit(2)
    #step=stepRes[1]['hour']-stepRes[0]['hour']
    #if step == 0.5:
    #    stepdenom=2
    #else:
    #stepdenom=1
    #safe eval forecast to list of tuples
    forc = ast.literal_eval(forecast)
    set_nee_data(db,site,head,wd,outfile,start,end,forc,upload)#,stepdenom,modWeather)
Example #24
0
 def getFiles(self, taskname, filetype):
     self.logger.debug("Calling jobmetadata for task %s and filetype %s" % (taskname, filetype))
     binds = {'taskname': taskname, 'filetype': filetype}
     rows = self.api.query(None, None, self.FileMetaData.GetFromTaskAndType_sql, **binds)
     for row in rows:
         yield {'taskname': taskname,
                'filetype': filetype,
                'pandajobid': row[0],
                'outdataset': row[1],
                'acquisitionera': row[2],
                'swversion': row[3],
                'inevents': row[4],
                'globaltag': row[5],
                'publishname': row[6],
                'location': row[7],
                'tmplocation': row[8],
                'runlumi': literal_eval(row[9].read()),
                'adler32': row[10],
                'cksum': row[11],
                'md5': row[12],
                'lfn': row[13],
                'filesize': row[14],
                'parents': literal_eval(row[15].read()),
                'state': row[16],
                'created': str(row[17]),}
Example #25
0
def custom_tecov2_setup(task_id,site,filename,years,forecast,modWeather,upload):
    # Header row
    header='Year  DOY  hour  T_air q1   Q_air  q2   Wind_speed q3     Precip   q4   Pressure   q5  R_global_in q6   R_longwave_in q7   CO2'
    head =['Year','DOY','hour','T_air','q1','Q_air','q2','Wind_speed','q3','Precip','q4','Pressure','q5',
            'R_global_in','q6','R_longwave_in','q7','CO2']
    #fixed width list of values
    wd=[4,5,7,14,2,14,2,14,2,14,2,14,2,14,2,14,2,11]
    #set working diectory
    wkdir = basedir + "celery_data/" + str(task_id)
    #wkdir = "/home/mstacy/test"
    os.chdir(wkdir)
    #open file and set header
    outfile = open(filename,"w")
    outfile.write(header + '\n\n')
    #open mongo connection
    db = Connection(mongoHost).teco
    #safe eval to get start and end dates
    yr=ast.literal_eval(years)
    start = datetime(yr[0],1,1)
    end = datetime(yr[1] + 1,1,1)
    #figure time step currrently only working for Hourly and half hourly
    if upload:
        stepRes=db.uploaded_data.find({"Site":site}).sort([('observed_date',1),('hour',1)]).limit(2)
    else:
        stepRes=db.forcing.find({"Site":site}).sort([('observed_date',1),('hour',1)]).limit(2)
    step=stepRes[1]['hour']-stepRes[0]['hour']
    if step == 0.5:
        stepdenom=2
    else:
        stepdenom=1
    #safe eval forecast to list of tuples
    forc = ast.literal_eval(forecast)
    set_input_data(db,site,head,wd,outfile,start,end,forc,stepdenom,modWeather,upload)
def merge_pieces(files, ip):
    ip = ip.replace("/","_")
    merge = {}
    if len(files) > 1:
        for index in range(len(files)):
            if index == len(files) - 1:
                filename = "fast_scaning_reports_"+ str(ip)
                report = file(filename, 'w')
                report.write(str(merge))
                report.close()
                return merge
            if index == 0:  
                input_1 = file(files[index]).read()
                os.remove(files[index])
                input_2 = file(files[index + 1]).read()
                os.remove(files[index + 1])
                input_1_dict = ast.literal_eval(input_1)
                input_2_dict = ast.literal_eval(input_2)
                merge = combineDicts(input_1_dict, input_2_dict)
            if index > 0:
                input_2 = file(files[index + 1]).read()
                os.remove(files[index + 1])
                input_2_dict= ast.literal_eval(input_2)
                merge = combineDicts(merge, input_2_dict)
    if len(files) == 1:
        os.rename(files[0], 'fast_scaning_reports_' + str(ip))
Example #27
0
    def get_os(self):
        cmd = 'python -c "import platform; raw = list(platform.dist());raw.append(platform.release());print raw"'
        data_out, data_err = self.execute(cmd)
        get_os_info = {}
        if not data_err:
            if 'command not found' not in data_out[0]:  # because some distros sport python3 by default!
                self.os, ver, release, kernel_version = ast.literal_eval(data_out[0])
                get_os_info['os'] = self.os
                get_os_info['osver'] = ver
                get_os_info['osverno'] =  kernel_version
                self.devargs.update({'os_info': get_os_info})
            else:
                cmd = 'python3 -c "import platform; raw = list(platform.dist());' \
                      'raw.append(platform.release());print (raw)"'
                data_out, data_err = self.execute(cmd)
                if not data_err:
                    self.os, ver, release, kernel_version = ast.literal_eval(data_out[0])
                    get_os_info['os'] = self.os
                    get_os_info['osver'] = ver
                    get_os_info['osverno'] =  kernel_version
                    self.devargs.update({'os_info': get_os_info})
                else:
                    if self.debug:
                        print '\t[-] Could not get OS info from host %s. Message was: %s' % (
                            self.machine_name, str(data_err))

        else:
            if self.debug:
                print '\t[-] Could not get OS info from host %s. Message was: %s' % (self.machine_name, str(data_err))
	def __init__(self, request, is_celery):
		self.privacy_level = request['privacy_level']
		self.epsilon = float(request['epsilon'])
		self.min_freq = float(request['min_freq']) if 'min_freq' in request.keys() else 0.
		self.exp_round = request['exp_round'] if 'exp_round' in request.keys() else None

		self.dp_id = request['dp_id']
		task = get_object_or_404(Task, pk = request['task_id']) if is_celery else request['task_id']
		self.task_id = task.task_id
		self.eps1_level = task.eps1_level
		self.data_path = task.data_path

		self.jtree_strct = ast.literal_eval(str(task.jtree_strct))
		self.opted_cluster = ast.literal_eval(str(task.opted_cluster))
		
		self.edges = ast.literal_eval(str(task.dep_graph))
		self.domain = task.domain if isinstance(task.domain, dict) else collections.OrderedDict(ast.literal_eval(task.domain)) # This is the corsed domain
		self.valbin_map = ast.literal_eval(str(task.valbin_map))
		self.selected_attrs = task.selected_attrs if isinstance(task.selected_attrs, dict) else self.convert_selected_attrs(task.selected_attrs)
		self.white_list = ast.literal_eval(str(task.white_list))
		self.nodes = self.domain.keys()

		self.histogramdds = None
		self.data = None
		self.sim_df = None
		self.statistics_err = None
		self.generalized_dataframe = None
		self.synthetic_path = None
  def start(self):
            fp=open(self.fileis)
            error=" "
            r=1
            for i, line in enumerate(fp):
              try:
                #print i
                if i==1000:
                    break

                
                if len(line)>10:
                    line=line.replace("\"\"","\"")
                    line=line.replace("\"{","{")
                    line=line.replace("}\"","}")
                    if len(self.keycol)==0:
                      self.makeColumn(ast.literal_eval(line))
                    self.addTosheet(ast.literal_eval(line),r)
                r +=1    
              except:
                  print "Exception acr"
                  pass
            print "saving file ..........."      
            self.workbook.close()
            print "file saved"
Example #30
0
def getMatch(startup):
    """Find top three matches for the given startup based on both Euclidean distance
    and Cosine similarity.
    
    Returns a tuple of grand total top matches, and top matches for each metric.
    Also returns basic site info for the matches.
    """

    vcList = loadVCList()
    scoresEuc, scoresCos = list(), list()

    for vc in vcList:
        # euclidean
        scoreEuc = eucDist(startup.fingerprint, ast.literal_eval(vc[1]))
        scoresEuc.append(tuple([vc[0], scoreEuc]))

        # cosine
        scoreCos = cosSim(startup.fingerprint, ast.literal_eval(vc[1]))
        scoresCos.append(tuple([vc[0], scoreCos]))

    scoresEuc = sorted(scoresEuc, key = itemgetter(1))[:3]
    scoresCos = sorted(scoresCos, key = itemgetter(1), reverse = True)[:3]

    scoresNetList = getNet(list((scoresEuc, scoresCos)))
    
    scoresNet = [vc for vc in vcList if vc[0] in scoresNetList[::-1]]
    
    return tuple((scoresNet, scoresEuc, scoresCos))
Example #31
0
def check_literal_eval(x):
    """Tenta identificar um tipo Python na string x - senão retorna x"""
    try:
        return literal_eval(x)
    except Exception:
        return x
def convert_ftweets_to_dict(count):
	for i in range(0, count):
		tweets_file.append(ast.literal_eval(ftweets[i]))
Example #33
0
user='******'
file='a.md'
content=[[]]


print('\nread_file block----------------------\n')
try: read_file(file,content)
except FileNotFoundError: 
    # # errs = open("logs",'r')#.read()#logs.open().read
    z="logs"

    err = {}
    err['datenow()']="1no such file"

    errs = a.literal_eval(open(z,'r').read())
    errs.update(err)

    # errs.update(err)
    print(errs)

    open(z,'w').write(str(errs))
    # f.write(temp)

    # open("w").write(err)
    
    # z='logs'
    # errs={}
    # f = open(z,"r")
    # errs = a.literal_eval(f.read())
    # f.close()
Example #34
0
def hdfgroup2dict(group, dictionary=None, lazy=False):
    if dictionary is None:
        dictionary = {}
    for key, value in group.attrs.items():
        if isinstance(value, bytes):
            value = value.decode()
        if isinstance(value, (np.string_, str)):
            if value == '_None_':
                value = None
        elif isinstance(value, np.bool_):
            value = bool(value)
        elif isinstance(value, np.ndarray) and value.dtype.char == "S":
            # Convert strings to unicode
            value = value.astype("U")
            if value.dtype.str.endswith("U1"):
                value = value.tolist()
        # skip signals - these are handled below.
        if key.startswith('_sig_'):
            pass
        elif key.startswith('_list_empty_'):
            dictionary[key[len('_list_empty_'):]] = []
        elif key.startswith('_tuple_empty_'):
            dictionary[key[len('_tuple_empty_'):]] = ()
        elif key.startswith('_bs_'):
            dictionary[key[len('_bs_'):]] = value.tostring()
        # The following two elif stataments enable reading date and time from
        # v < 2 of HyperSpy's metadata specifications
        elif key.startswith('_datetime_date'):
            date_iso = datetime.date(
                *ast.literal_eval(value[value.index("("):])).isoformat()
            dictionary[key.replace("_datetime_", "")] = date_iso
        elif key.startswith('_datetime_time'):
            date_iso = datetime.time(
                *ast.literal_eval(value[value.index("("):])).isoformat()
            dictionary[key.replace("_datetime_", "")] = date_iso
        else:
            dictionary[key] = value
    if not isinstance(group, h5py.Dataset):
        for key in group.keys():
            if key.startswith('_sig_'):
                from hyperspy.io import dict2signal
                dictionary[key[len('_sig_'):]] = (dict2signal(
                    hdfgroup2signaldict(group[key], lazy=lazy)))
            elif isinstance(group[key], h5py.Dataset):
                dat = group[key]
                kn = key
                if key.startswith("_list_"):
                    ans = np.array(dat)
                    ans = ans.tolist()
                    kn = key[6:]
                elif key.startswith("_tuple_"):
                    ans = np.array(dat)
                    ans = tuple(ans.tolist())
                    kn = key[7:]
                elif dat.dtype.char == "S":
                    ans = np.array(dat)
                    try:
                        ans = ans.astype("U")
                    except UnicodeDecodeError:
                        # There are some strings that must stay in binary,
                        # for example dill pickles. This will obviously also
                        # let "wrong" binary string fail somewhere else...
                        pass
                elif lazy:
                    ans = da.from_array(dat, chunks=dat.chunks)
                else:
                    ans = np.array(dat)
                dictionary[kn] = ans
            elif key.startswith('_hspy_AxesManager_'):
                dictionary[key[len('_hspy_AxesManager_'):]] = AxesManager([
                    i for k, i in sorted(
                        iter(hdfgroup2dict(group[key], lazy=lazy).items()))
                ])
            elif key.startswith('_list_'):
                dictionary[key[7 + key[6:].find('_'):]] = \
                    [i for k, i in sorted(iter(
                        hdfgroup2dict(
                            group[key], lazy=lazy).items()
                    ))]
            elif key.startswith('_tuple_'):
                dictionary[key[8 + key[7:].find('_'):]] = tuple([
                    i for k, i in sorted(
                        iter(hdfgroup2dict(group[key], lazy=lazy).items()))
                ])
            else:
                dictionary[key] = {}
                hdfgroup2dict(group[key], dictionary[key], lazy=lazy)
    return dictionary
Example #35
0
def str_to_list(st):
    return ast.literal_eval(st.strip())
Example #36
0
# load vector representations
if vecoption == 'tf':
    filename = folder + '/tf.txt'
elif vecoption == 'tfidf':
    filename = folder + '/tfidf.txt'
else:
    print('wrong vector model name')
with open(filename) as json_file:
    vec = json.load(json_file)

wordset = set()
gestureset = set()

for key, value in vec.items():
    li = ast.literal_eval(key)
    gestureset.add(li[0])
    wordset.add((li[1], li[2], li[3]))

w2i = {}
for idx, word in enumerate(wordset):
    w2i[word] = idx

gesturelist = sorted([int(v) for v in gestureset])
f2i = {}  # map from document to index
i2f = {}  # map from index to document
for idx, finset in enumerate(gesturelist):
    f2i[str(finset)] = idx
    i2f[idx] = str(finset)

# transform vector in dictionary to a matrix (row: word, column: file)
Example #37
0
def submitAnswer(request, code):
	context={"success": False}

	if not request.user.is_authenticated:
		context["error"]="You are not logged in"
		context["success"]=False
		return JsonResponse(context)


	if request.method == "POST":
		trivia = Trivia.objects.filter(code=code).first()
		if trivia and not trivia.locked:
			result = TriviaResult.objects.filter(user=request.user, trivia=trivia).first()
			if result:
				time_elapsed_for_contest = None

				now = get_current_time()
				if trivia.individual_timing:
					time_elapsed_for_contest = int((now-result.start_time).total_seconds())
				else:
					time_elapsed_for_contest = int((now-trivia.start_time).total_seconds())


				if time_elapsed_for_contest<trivia.duration and not submitted(result):
					q_id = int(request.POST.get("q_id"))
					opt_id = int(request.POST.get("opt_id"))
					q_ind = int(request.POST.get("q_ind"))
					time_elapsed = int(request.POST.get("time_elapsed"))
					answers = result.answers
					answers = ast.literal_eval(answers)
					
					if q_id in answers.keys():
						answer = answers[q_id]
						if answer["opt_id"]==0 and answer["time_elapsed"]<=time_elapsed:
							answer["opt_id"]=opt_id
							answer["time_elapsed"]=time_elapsed
							context["success"]=True
							
						elif trivia.can_change_answer and answer["time_elapsed"]<=time_elapsed:
							answer["opt_id"]=opt_id
							answer["time_elapsed"]=time_elapsed
							context["success"]=True

						else:
							context["error"]="Can not change answer"
							
						answers[q_id] = answer


					else:
						answer = {"opt_id": opt_id, "time_elapsed": time_elapsed}
						answers[q_id] = answer
						context["success"]=True

					context["opt_id"]=answers[q_id]["opt_id"]
					context["q_ind"]=q_ind
					answers = str(answers)
					result.answers = answers
					result.save()
					

				else:
					context["contest_ended"]=True
					context["error"]="Trivia ended for you"
					return JsonResponse(context)

			else:
				context["error"] = "You are not in contest"
				return JsonResponse(context)
		else:
			context["error"] = "Trivia does not exist"
			return JsonResponse(context)
		

	return JsonResponse(context)
Example #38
0
 def load(self, tree, trg_seqs, parent_seqs, sibling_seqs, leaf_seqs, rule_seqs, word_mask, rule_mask, par_dict):
     rule_ind = 0
     word_ind = 0
     if tree.is_leaf:
         nt, word, tag = tree.name.split('__')
         word = word.lower()
         rule_seqs.append(0)
         rule_mask.append(0)
         leaf_seqs.append(1)
     else:
         nt, word, tag, rule = tree.name.split('__')
         word = word.lower()
         word_ind = self.word_dict[word] if word in self.word_dict else self.word_dict['<UNK>']
         inh = literal_eval(rule[rule.find('['):rule.find(']')+1])[0]
         rule = rule[:rule.find('[')-1]
         tag = rule.split()[inh + 1]
         rule_ind = self.rule_dict[rule] if rule in self.rule_dict else self.rule_dict['<UNK>']
         rule_seqs.append(rule_ind)
         rule_mask.append(1)
         leaf_seqs.append(0)
     '''
     par_rule_ind = 0
     if tree.parent is not None:
         _, _, _, par_rule = tree.parent.name.split('__')
         par_rule = par_rule[:par_rule.find('[')-1]
         par_rule_ind = self.rule_dict[par_rule]
     '''
     trg_seqs.append((self.nt_dict[nt], word_ind, self.nt_dict[tag], rule_ind))
     '''
     if tree.parent is not None:
         parent_seqs[0].append(par_dict[tree.parent])
         if tree.parent.parent is not None:
             parent_seqs[1].append(par_dict[tree.parent.parent])
             pos = tree.parent.parent.children.index(tree.parent)
             if pos > 0:
                 sibling_seqs[1].append(par_dict[tree.parent.parent.children[pos-1]]) 
             else:
                 sibling_seqs[1].append((0, 0, 0))
         else:
             parent_seqs[1].append((0, 0, 0))
             sibling_seqs[1].append((0, 0, 0))
         pos = tree.parent.children.index(tree)
         if pos > 0:
             sibling_seqs[0].append(par_dict[tree.parent.children[pos-1]]) 
         else:
             sibling_seqs[0].append((0, 0, 0))
     else:
         parent_seqs[0].append((0, 0, 0))
         sibling_seqs[0].append((0, 0, 0))
         parent_seqs[1].append((0, 0, 0))
         sibling_seqs[1].append((0, 0, 0))
     '''
     anc = tree.ancestors
     '''
     if len(anc) >= 3:
         parent_seqs[0].append(par_dict[anc[1]])
         parent_seqs[1].append(par_dict[anc[2]])
     elif len(anc) == 2:
         parent_seqs[0].append(par_dict[anc[1]])
         parent_seqs[1].append((0, 0, 0))
     else:
         parent_seqs[0].append((0, 0, 0))
         parent_seqs[1].append((0, 0, 0))
     '''
     parent_seqs[0].append((0, 0, 0))
     parent_seqs[1].append((0, 0, 0))
     sibling_seqs[0].append((0, 0, 0))
     sibling_seqs[1].append((0, 0, 0))
     par_dict[tree] = trg_seqs[-1]
     if tree.parent:
         ind = tree.parent.children.index(tree)
         openb = tree.parent.name.rindex('[')
         closeb = tree.parent.name.rindex(']')
         inds = literal_eval(tree.parent.name[openb:closeb+1])
         if ind in inds:
             word_mask.append(0)
         else:
             word_mask.append(1)
     else:
         word_mask.append(1)
     for ch in tree.children:
         self.load(ch, trg_seqs, parent_seqs, sibling_seqs, leaf_seqs, rule_seqs, word_mask, rule_mask, par_dict)
import ast
import sys
file_name = sys.argv[1]
f = open(file_name , "r")
lines = f.readlines()
address_list = []
for line in lines:
    line = line.replace(";", ",")
    line = ast.literal_eval(line)
    value = line[1]
    address_list.append(value)
address_list = str(address_list).replace(",",";")
print(("Appartments with greater than 6 stories", address_list))
Example #40
0
    def simulate(self, n=-1):
        """
        Simulates a trader's performance. creates new the variables eq_bal as equivalent balance
        (i.e. all values are transfered into Bitcoins (XBT) in order to compare them easily -
            be careful, this depends heavily on the exchange rates!!!)
        """
        pair = "xxbtzeur"  # self.trader.price.iterkeys().next()
        i = 0
        pair_len = self.dbq.length(pair)
        if n == -1:
            n = pair_len

        start_time = self.dbq.gettimeat(pair, pair_len - n)
        end_time = dt.datetime.now()
        self.starting_balance(start_time)

        balance = self.account.balance  # This copys only the pointer. changing balance will change self.account.balance
        if not self.optimize:
            s_balance = self.account.balance.copy()

        start_bal = end_bal = 0
        for cur in self.account.balance.keys():
            start_bal += self.hf.get_eq_bal(balance[cur], cur, start_time,
                                            'ZEUR')
            end_bal += self.hf.get_eq_bal(balance[cur], cur, end_time, 'ZEUR')

        for time in self.dbq.timestamps(table=pair, limit=n):
            # Sell action
            advice = self.trader.get_sell_advice(time[0])
            sold = dict()
            credit_item = dict()
            for pair in sorted(advice,
                               key=lambda key: advice[key],
                               reverse=True):
                base = self.hf.get_base(pair)
                quote = self.hf.get_quote(pair)
                # Check if sufficient funds
                # TODO: add transaction fees dependent on numbers of transaction (change 2nd last index)
                price = self.dbq.get(table=pair,
                                     column='bid_price',
                                     time=time[0])[0][0]
                amountsell = min(advice[pair], balance[base])
                amountbuy = (amountsell / price) *\
                            (1 - ast.literal_eval(self.account.asset_pairs[pair]['fees'])[0][1] / 100)
                if amountsell > 0.01:
                    if quote not in credit_item:
                        credit_item.update({quote: 0})
                    balance[base] -= amountsell
                    credit_item[quote] += amountbuy
                    sold[pair] = [amountsell, amountbuy, price]

            # buy action
            advice = self.trader.get_buy_advice(time[0])
            bought = dict()
            for pair in sorted(advice,
                               key=lambda key: advice[key],
                               reverse=True):
                base = self.hf.get_base(pair)
                quote = self.hf.get_quote(pair)
                # Check if enough money is left to buy
                price = self.dbq.get(table=pair,
                                     column='ask_price',
                                     time=time[0])[0][0]
                amountsell = min(balance[quote], (advice[pair] * price))
                amountbuy = amountsell / price
                if amountbuy > 0.01:
                    if base not in credit_item:
                        credit_item.update({base: 0})
                    balance[quote] -= amountsell
                    credit_item[base] += amountbuy *\
                                          (1 - ast.literal_eval(self.account.asset_pairs[pair]['fees'])[0][1]/100)
                    bought[pair] = [amountbuy, amountsell, price]

            if not self.optimize:
                if sold or bought:
                    print("-----\nPerformed trade ($): sell: " + str(sold) +
                          " buy: " + str(bought))
                    # Write credit items to the account balance
                    if credit_item:
                        for curr in credit_item:
                            balance[curr] += credit_item[curr]
                    rel_bal = dict()
                    for cur in balance.keys():
                        rel_bal[cur] = self.hf.get_eq_bal(
                            balance[cur], cur, time[0], 'ZEUR')
                    eq_bal = sum(rel_bal.values())
                    s_rel_bal = dict()
                    for cur in s_balance.keys():
                        s_rel_bal[cur] = self.hf.get_eq_bal(
                            s_balance[cur], cur, time[0], 'ZEUR')
                    s_eq_bal = sum(s_rel_bal.values())
                    for bal in rel_bal:
                        rel_bal[bal] = round(rel_bal[bal] / eq_bal, 2) * 100
                    print(str(time[0])+" "+str(i)+", Equivalent in "+self.reference_curr + ": " +
                          str(round(eq_bal, 2)) +
                          ",\n\t Compared to market ("+str(round(s_eq_bal, 2))+"): " +
                          str(round((eq_bal/s_eq_bal-1)*100, 2))+\
                          "%,\n\t Compared to start ("+str(round(start_bal, 2))+"): " +
                          str(round((eq_bal/start_bal-1)*100, 2))+"%." +
                          "\nRelative balances[%]: "+str(sorted(rel_bal.items(), key=lambda x: x[1], reverse=True)))

        print("Start balance: " + str(start_bal))
        print("Market adjusted end balance: " + str(end_bal))
        print("Return: " + str(eq_bal / start_bal * 100 - 100) + "%")
        print("-----------------------\nSingle Positions:")
        for bal in balance:
            print(str(bal) + ": " + str(balance[bal]))
        return eq_bal
Example #41
0
def __import_data__(chat_id, data):
    failures = []
    for notename, notedata in data.get('extra', {}).items():
        match = FILE_MATCHER.match(notedata)
        matchsticker = STICKER_MATCHER.match(notedata)
        matchbtn = BUTTON_MATCHER.match(notedata)
        matchfile = MYFILE_MATCHER.match(notedata)
        matchphoto = MYPHOTO_MATCHER.match(notedata)
        matchaudio = MYAUDIO_MATCHER.match(notedata)
        matchvoice = MYVOICE_MATCHER.match(notedata)
        matchvideo = MYVIDEO_MATCHER.match(notedata)
        matchvn = MYVIDEONOTE_MATCHER.match(notedata)

        if match:
            failures.append(notename)
            notedata = notedata[match.end():].strip()
            if notedata:
                sql.add_note_to_db(chat_id, notename[1:], notedata,
                                   sql.Types.TEXT)
        elif matchsticker:
            content = notedata[matchsticker.end():].strip()
            if content:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.STICKER,
                                   file=content)
        elif matchbtn:
            parse = notedata[matchbtn.end():].strip()
            notedata = parse.split("<###button###>")[0]
            buttons = parse.split("<###button###>")[1]
            buttons = ast.literal_eval(buttons)
            if buttons:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.BUTTON_TEXT,
                                   buttons=buttons)
        elif matchfile:
            file = notedata[matchfile.end():].strip()
            file = file.split("<###TYPESPLIT###>")
            notedata = file[1]
            content = file[0]
            if content:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.DOCUMENT,
                                   file=content)
        elif matchphoto:
            photo = notedata[matchphoto.end():].strip()
            photo = photo.split("<###TYPESPLIT###>")
            notedata = photo[1]
            content = photo[0]
            if content:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.PHOTO,
                                   file=content)
        elif matchaudio:
            audio = notedata[matchaudio.end():].strip()
            audio = audio.split("<###TYPESPLIT###>")
            notedata = audio[1]
            content = audio[0]
            if content:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.AUDIO,
                                   file=content)
        elif matchvoice:
            voice = notedata[matchvoice.end():].strip()
            voice = voice.split("<###TYPESPLIT###>")
            notedata = voice[1]
            content = voice[0]
            if content:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.VOICE,
                                   file=content)
        elif matchvideo:
            video = notedata[matchvideo.end():].strip()
            video = video.split("<###TYPESPLIT###>")
            notedata = video[1]
            content = video[0]
            if content:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.VIDEO,
                                   file=content)
        elif matchvn:
            video_note = notedata[matchvn.end():].strip()
            video_note = video_note.split("<###TYPESPLIT###>")
            notedata = video_note[1]
            content = video_note[0]
            if content:
                sql.add_note_to_db(chat_id,
                                   notename[1:],
                                   notedata,
                                   sql.Types.VIDEO_NOTE,
                                   file=content)
        else:
            sql.add_note_to_db(chat_id, notename[1:], notedata, sql.Types.TEXT)

    if failures:
        with BytesIO(str.encode("\n".join(failures))) as output:
            output.name = "failed_imports.txt"
            dispatcher.bot.send_document(
                chat_id,
                document=output,
                filename="failed_imports.txt",
                caption=tl(
                    update.effective_message,
                    "File/photo failed to import due to come "
                    "from other bots. This is a limitation of Telegram API, and could not "
                    "avoided. Sorry for the inconvenience!"))
 def get_actual_ostype(self):
     version_raw = self.retrieve_ostype()
     dist, version, extra = ast.literal_eval(version_raw)
     return dist, version, extra
    def _generate_examples(self, filepaths):
        """Yields examples."""
        # TODO(cornell_movie_dialog): Yields (key, example) tuples from the dataset
        movie_char_file = os.path.join(filepaths, "movie_characters_metadata.txt")
        movie_conv_file = os.path.join(filepaths, "movie_conversations.txt")
        movie_lines_file = os.path.join(filepaths, "movie_lines.txt")
        movie_titles_file = os.path.join(filepaths, "movie_titles_metadata.txt")

        with open(movie_char_file, "rb") as f:
            movie_char_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]

        with open(movie_conv_file, "rb") as f:
            movie_conv_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]

        with open(movie_lines_file, "rb") as f:
            movie_lines_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]

        with open(movie_titles_file, "rb") as f:
            movie_titles_data = [x.decode("latin").split("+++$+++") for x in f.readlines()]
        # looping over movie conversation file
        for id_, conv in enumerate(movie_conv_data):
            char_id_1 = conv[0]
            char_id_2 = conv[1]
            movie_id = conv[2]
            line_ids = conv[-1].replace("\n", "")
            line_ids = ast.literal_eval(line_ids.strip())
            lines_texts = []
            # searching text corresponding to each lineID in line_ids in movie lines file
            for line_id in line_ids:
                i = 0
                while i < len(movie_lines_data) and movie_lines_data[i][0].strip() != line_id:
                    i += 1
                lines_texts.append(movie_lines_data[i][0])  # if i < len(movie_lines_data) else '')
            # look for char names in movie character file
            j = 0
            while j < len(movie_char_data) and movie_char_data[j][0].strip() != char_id_1.strip():
                j += 1
            char_name_1 = movie_char_data[j][1]  # if j < len(movie_char_data) else ''
            movie_title = movie_char_data[j][3]  # if j < len(movie_char_data) else ''

            k = 0
            while k < len(movie_char_data) and movie_char_data[k][0].strip() != char_id_2.strip():
                k += 1
            char_name_2 = movie_char_data[k][1]

            # look for movie year, IMDBRating, genre, no_imdb_voting in movie tiles file
            li = 0
            while li < len(movie_titles_data) and movie_titles_data[li][0].strip() != movie_id.strip():
                li += 1
            movie_year = movie_titles_data[li][2]
            imdb_rating = movie_titles_data[li][3]
            no_imdb_vote = movie_titles_data[li][4]
            genre = movie_titles_data[li][5].replace("\n", "").strip()
            movie_genres = ast.literal_eval(genre)

            yield id_, {
                "movieID": movie_id,
                "movieTitle": movie_title,
                "movieYear": movie_year,
                "movieIMDBRating": imdb_rating,
                "movieNoIMDBVotes": no_imdb_vote,
                "movieGenres": movie_genres,
                "characterID1": char_id_1,
                "characterID2": char_id_2,
                "characterName1": char_name_1,
                "characterName2": char_name_2,
                "utterance": {"text": lines_texts, "LineID": line_ids},
            }
    def element_attribute_should_match(self,
                                       locator,
                                       attr_name,
                                       match_pattern,
                                       regexp=False):
        """Verify that an attribute of an element matches the expected criteria.

        The element is identified by _locator_. See `introduction` for details
        about locating elements. If more than one element matches, the first element is selected.

        The _attr_name_ is the name of the attribute within the selected element.

        The _match_pattern_ is used for the matching, if the match_pattern is
        - boolean or 'True'/'true'/'False'/'false' String then a boolean match is applied
        - any other string is cause a string match

        The _regexp_ defines whether the string match is done using regular expressions (i.e. BuiltIn Library's
        [http://robotframework.org/robotframework/latest/libraries/BuiltIn.html#Should%20Match%20Regexp|Should
        Match Regexp] or string pattern match (i.e. BuiltIn Library's
        [http://robotframework.org/robotframework/latest/libraries/BuiltIn.html#Should%20Match|Should
        Match])


        Examples:

        | Element Attribute Should Match | xpath = //*[contains(@text,'foo')] | text | *foobar |
        | Element Attribute Should Match | xpath = //*[contains(@text,'foo')] | text | f.*ar | regexp = True |
        | Element Attribute Should Match | xpath = //*[contains(@text,'foo')] | enabled | True |

        | 1. is a string pattern match i.e. the 'text' attribute should end with the string 'foobar'
        | 2. is a regular expression match i.e. the regexp 'f.*ar' should be within the 'text' attribute
        | 3. is a boolead match i.e. the 'enabled' attribute should be True


        _*NOTE: *_
        On Android the supported attribute names are hard-coded in the
        [https://github.com/appium/appium/blob/master/lib/devices/android/bootstrap/src/io/appium/android/bootstrap/AndroidElement.java|AndroidElement]
        Class's getBoolAttribute() and getStringAttribute() methods.
        Currently supported (appium v1.4.11):
        _contentDescription, text, className, resourceId, enabled, checkable, checked, clickable, focusable, focused, longClickable, scrollable, selected, displayed_


        _*NOTE: *_
        Some attributes can be evaluated in two different ways e.g. these evaluate the same thing:

        | Element Attribute Should Match | xpath = //*[contains(@text,'example text')] | name | txt_field_name |
        | Element Name Should Be         | xpath = //*[contains(@text,'example text')] | txt_field_name |      |

        """
        elements = self._element_find(locator, False, True)
        if len(elements) > 1:
            self._info(
                "CAUTION: '%s' matched %s elements - using the first element only"
                % (locator, len(elements)))

        attr_value = elements[0].get_attribute(attr_name)

        # ignore regexp argument if matching boolean
        if isinstance(match_pattern, bool) or match_pattern.lower(
        ) == 'true' or match_pattern.lower() == 'false':
            if isinstance(match_pattern, bool):
                match_b = match_pattern
            else:
                match_b = ast.literal_eval(match_pattern.title())

            if isinstance(attr_value, bool):
                attr_b = attr_value
            else:
                attr_b = ast.literal_eval(attr_value.title())

            self._bi.should_be_equal(match_b, attr_b)

        elif regexp:
            self._bi.should_match_regexp(
                attr_value,
                match_pattern,
                msg="Element '%s' attribute '%s' should have been '%s' "
                "but it was '%s'." %
                (locator, attr_name, match_pattern, attr_value),
                values=False)
        else:
            self._bi.should_match(
                attr_value,
                match_pattern,
                msg="Element '%s' attribute '%s' should have been '%s' "
                "but it was '%s'." %
                (locator, attr_name, match_pattern, attr_value),
                values=False)
        #if expected != elements[0].get_attribute(attr_name):
        #    raise AssertionError("Element '%s' attribute '%s' should have been '%s' "
        #                         "but it was '%s'." % (locator, attr_name, expected, element.get_attribute(attr_name)))
        self._info("Element '%s' attribute '%s' is '%s' " %
                   (locator, attr_name, match_pattern))
Example #45
0
 def readConfig(self, parser):
     """ Read in and check options passed by the config file.
     """
     self.jobName = str(parser.get('logistics', 'jobName'))
     self.outDir = str(parser.get('logistics', 'outDir'))
     self.acctKey = str(parser.get('logistics', 'acctKey'))
     self.queName = str(parser.get('logistics', 'optQueNameModel'))
     self.queNameAnalysis = str(
         parser.get('logistics', 'optQueNameAnalysis'))
     self.optCalStripFlag = int(parser.get('logistics',
                                           'stripCalibOutputs'))
     self.optCalStripHrs = int(parser.get('logistics', 'stripCalibHours'))
     self.nCoresMod = int(parser.get('logistics', 'nCoresModel'))
     self.nNodesMod = int(parser.get('logistics', 'nNodesModel'))
     self.nCoresR = int(parser.get('logistics', 'nCoresR'))
     self.nNodesR = int(parser.get('logistics', 'nNodesR'))
     self.nIter = int(parser.get('logistics', 'numIter'))
     self.sensFlag = int(parser.get('logistics', 'runSens'))
     self.sensTbl = str(parser.get('logistics', 'sensParmTbl'))
     self.calibFlag = int(parser.get('logistics', 'runCalib'))
     self.calibTbl = str(parser.get('logistics', 'calibParmTbl'))
     self.dailyAnalysis = int(parser.get('logistics', 'dailyStats'))
     self.coldStart = int(parser.get('logistics', 'coldStart'))
     self.optSpinFlag = int(parser.get('logistics', 'optSpinFlag'))
     self.jobRunType = int(parser.get('logistics', 'jobRunType'))
     self.analysisRunType = int(parser.get('logistics', 'analysisRunType'))
     self.objFunc = str(parser.get('logistics', 'objectiveFunction'))
     self.ddsR = str(parser.get('logistics', 'ddsR'))
     if len(self.ddsR) != 0:
         self.ddsR = float(self.ddsR)
     self.email = str(parser.get('logistics', 'email'))
     #self.slChan = str(parser.get('logistics','slackChannel'))
     #self.slToken = str(parser.get('logistics','slackToken'))
     #self.slUser = str(parser.get('logistics','slackUser'))
     # Initiate Slack object if user has specified. Throw an error message
     # if Slack is not successfully inititated.
     #if len(self.slChan) > 0:
     #    try:
     #        self.slackObj = Slacker(str(self.slToken))
     #    except:
     #        print "ERROR: Failure to initiate Slack."
     #        raise
     self.exe = str(parser.get('logistics', 'wrfExe'))
     self.genParmTbl = str(parser.get('logistics', 'genParmTbl'))
     self.mpParmTbl = str(parser.get('logistics', 'mpParmTbl'))
     self.urbParmTbl = str(parser.get('logistics', 'urbParmTbl'))
     self.vegParmTbl = str(parser.get('logistics', 'vegParmTbl'))
     self.soilParmTbl = str(parser.get('logistics', 'soilParmTbl'))
     self.bSpinDate = parser.get('logistics', 'bSpinDate')
     self.bSpinDate = datetime.datetime.strptime(self.bSpinDate, '%Y-%m-%d')
     self.eSpinDate = parser.get('logistics', 'eSpinDate')
     self.eSpinDate = datetime.datetime.strptime(self.eSpinDate, '%Y-%m-%d')
     self.bCalibDate = parser.get('logistics', 'bCalibDate')
     self.bCalibDate = datetime.datetime.strptime(self.bCalibDate,
                                                  '%Y-%m-%d')
     self.eCalibDate = parser.get('logistics', 'eCalibDate')
     self.eCalibDate = datetime.datetime.strptime(self.eCalibDate,
                                                  '%Y-%m-%d')
     self.bCalibEvalDate = parser.get('logistics', 'bCalibEvalDate')
     self.bCalibEvalDate = datetime.datetime.strptime(
         self.bCalibEvalDate, '%Y-%m-%d')
     # Calculate the beginning date for full outputs. If no optional
     # flag for stripping outputs is off, then set this date to the beginning
     # of the model simulation.
     if self.optCalStripFlag == 1:
         self.bCalibFullOutputs = self.bCalibDate + datetime.timedelta(
             seconds=3600 * self.optCalStripHrs)
         # Run a check here..... If the user has specified a date that is NOT
         # the beginning of the month, throw an error. When minimal outputs are activated,
         # only restart files at the beginning of the month are available. If the
         # user specifies to stript outputs to a date that is any other step,
         # the workflow will continuously initialize calibration model simulations
         # from a timestep with no restart file available.
         if self.bCalibFullOutputs.day != 1 and self.bCalibFullOutputs.hour != 0:
             print "ERROR: Please specify a stripCalibHours value that results in a date at the beginning of the month."
             raise Exception()
     else:
         self.bCalibFullOutputs = self.bCalibDate
     self.bValidDate = parser.get('logistics', 'bValidDate')
     self.bValidDate = datetime.datetime.strptime(self.bValidDate,
                                                  '%Y-%m-%d')
     self.eValidDate = parser.get('logistics', 'eValidDate')
     self.eValidDate = datetime.datetime.strptime(self.eValidDate,
                                                  '%Y-%m-%d')
     self.bValidEvalDate = parser.get('logistics', 'bValidEvalDate')
     self.bValidEvalDate = datetime.datetime.strptime(
         self.bValidEvalDate, '%Y-%m-%d')
     self.nSensSample = int(parser.get('Sensitivity', 'sensParmSample'))
     self.nSensBatch = int(parser.get('Sensitivity', 'sensBatchNum'))
     self.bSensDate = parser.get('Sensitivity', 'bSensDate')
     self.bSensDate = datetime.datetime.strptime(self.bSensDate, '%Y-%m-%d')
     self.eSensDate = parser.get('Sensitivity', 'eSensDate')
     self.eSensDate = datetime.datetime.strptime(self.eSensDate, '%Y-%m-%d')
     self.bSensEvalDate = parser.get('Sensitivity', 'bSensEvalDate')
     self.bSensEvalDate = datetime.datetime.strptime(
         self.bSensEvalDate, '%Y-%m-%d')
     self.gSQL = parser.get('gageInfo', 'gageListSQL')
     self.gList = str(parser.get('gageInfo', 'gageListFile'))
     self.dynVegOpt = int(parser.get('lsmPhysics', 'dynVegOption'))
     self.canStomOpt = int(parser.get('lsmPhysics', 'canStomResOption'))
     self.btrOpt = int(parser.get('lsmPhysics', 'btrOption'))
     self.runOffOpt = int(parser.get('lsmPhysics', 'runoffOption'))
     self.sfcDragOpt = int(parser.get('lsmPhysics', 'sfcDragOption'))
     self.frzSoilOpt = int(parser.get('lsmPhysics', 'frzSoilOption'))
     self.supCoolOpt = int(parser.get('lsmPhysics', 'supCoolOption'))
     self.radTOpt = int(parser.get('lsmPhysics', 'radTransferOption'))
     self.snAlbOpt = int(parser.get('lsmPhysics', 'snAlbOption'))
     self.pcpPartOpt = int(parser.get('lsmPhysics', 'pcpPartOption'))
     self.tbotOpt = int(parser.get('lsmPhysics', 'tbotOption'))
     self.timeSchmOpt = int(parser.get('lsmPhysics', 'tempTimeSchOption'))
     self.sfcResOpt = int(parser.get('lsmPhysics', 'sfcResOption'))
     self.glacier = int(parser.get('lsmPhysics', 'glacierOption'))
     self.soilThick = ast.literal_eval(parser.get('lsmPhysics',
                                                  'soilThick'))
     self.zLvl = float(parser.get('lsmPhysics', 'zLvl'))
     self.fType = int(parser.get('forcing', 'forceType'))
     self.fDT = int(parser.get('modelTime', 'forceDt'))
     self.lsmDt = int(parser.get('modelTime', 'lsmDt'))
     self.lsmOutDt = int(parser.get('modelTime', 'lsmOutDt'))
     self.lsmRstFreq = int(parser.get('modelTime', 'lsmRstFreq'))
     self.hydroRstFreq = int(parser.get('modelTime', 'hydroRstFreq'))
     self.hydroOutDt = int(parser.get('modelTime', 'hydroOutDt'))
     self.rstType = int(parser.get('hydroIO', 'rstType'))
     self.ioConfigOutputs = int(parser.get('hydroIO', 'ioConfigOutputs'))
     self.ioFormOutputs = int(parser.get('hydroIO', 'ioFormOutputs'))
     self.chrtoutDomain = int(parser.get('hydroIO', 'chrtoutDomain'))
     self.chanObs = int(parser.get('hydroIO', 'chanObsDomain'))
     self.chrtoutGrid = int(parser.get('hydroIO', 'chrtoutGrid'))
     self.lsmDomain = int(parser.get('hydroIO', 'lsmDomain'))
     self.rtoutDomain = int(parser.get('hydroIO', 'rtoutDomain'))
     self.gwOut = int(parser.get('hydroIO', 'gwOut'))
     self.lakeOut = int(parser.get('hydroIO', 'lakeOut'))
     self.frxstPts = int(parser.get('hydroIO', 'frxstOut'))
     self.resetHydro = int(parser.get('hydroIO', 'resetHydroAcc'))
     self.strOrder = int(parser.get('hydroIO', 'streamOrderOut'))
     self.dtChRt = int(parser.get('hydroPhysics', 'dtChSec'))
     self.dtTerRt = int(parser.get('hydroPhysics', 'dtTerSec'))
     self.subRtFlag = int(parser.get('hydroPhysics', 'subRouting'))
     self.ovrRtFlag = int(parser.get('hydroPhysics', 'ovrRouting'))
     self.rtOpt = int(parser.get('hydroPhysics', 'rtOpt'))
     self.chnRtFlag = int(parser.get('hydroPhysics', 'channelRouting'))
     self.chnRtOpt = int(parser.get('hydroPhysics', 'chanRtOpt'))
     self.udmpOpt = int(parser.get('hydroPhysics', 'udmpOpt'))
     self.gwBaseFlag = int(parser.get('hydroPhysics', 'gwBaseSw'))
     self.gwRst = int(parser.get('hydroPhysics', 'gwRestart'))
     self.cmpdChan = int(parser.get('hydroPhysics', 'compoundChannel'))
Example #46
0
        cur = con.cursor()
        cur.execute(
            "SELECT rtu_data_date, rtu_point_record from sldcsch.rtu_point_data where rtu_data_date='"
            + str(yesterday) + "' and rtu_point_id ='13008'")
        yrows = cur.fetchall()
        print(len(yrows))
        dfcyesterday = pd.DataFrame(columns=['date', 'rtudata'])
        l = 1
        for row in yrows:
            dfcyesterday.loc[l, 'date'] = str(row[0])
            dfcyesterday.loc[l, 'rtudata'] = str(row[1])
            l = l + 1
        mytime = datetime.now().time()
        todaystr = t1.today().strftime('%Y-%m-%d')
        anlist2 = pd.Series.tolist(dfcyesterday['rtudata'])
        anlist2 = ast.literal_eval(anlist2[0])
        timelist2 = []
        anvalue2 = []
        for x in anlist2:
            anvalue2.append(x['recordValue'])
            string = x['recordTime']
            string = todaystr + " " + string
            timelist2.append(string)
        yesdf = pd.DataFrame()
        yesdf['timelist'] = timelist2
        yesdf['value'] = anvalue2
        yesdf = yesdf.set_index('timelist')

        x_input = yesdf[-60:].values
        dataset_total = x_input.copy()
        n_future_preds = 30
Example #47
0
# Copyright (C) British Crown (Met Office) & Contributors.
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Test the ability of the query parser to generate logical expressions."""

import ast
import sys
from metomi.rosie.db import DAO
from metomi.rosie.db_create import RosieDatabaseInitiator
from tempfile import NamedTemporaryFile

if __name__ == "__main__":
    f = NamedTemporaryFile()
    db_url = "sqlite:////" + f.name
    RosieDatabaseInitiator().create(db_url)
    dao = DAO(db_url)
    print(str(dao.parse_filters_to_expr(ast.literal_eval(sys.argv[1]))))
Example #48
0
def checkConfig(parser):
    """ Function to check all options in the config file.
    """
    # Go through and check everything put into the config file.
    check = str(parser.get('logistics', 'outDir'))
    if len(check) == 0:
        print "ERROR: Zero length output directory provided."
        raise Exception()
    if not os.path.isdir(check):
        print "ERROR: Directory: " + check + " not found."
        raise Exception()

    check = str(parser.get('logistics', 'jobName'))
    if len(check) == 0:
        print "ERROR: Zero length job name provided."
        raise Exception()

    check = str(parser.get('logistics', 'acctKey'))
    if len(check) == 0:
        print "WARNING: Zero length account key passed to program."

    # We won't check the optional que name as it's optional. Even if some
    # run with a job submission method, they may not need to run with a que.

    # Either email or Slack must be chosen. If Slack is chosen, user
    # must provide both channel and API token.
    # FOR NOW WILL RELAX EMAIL CONSTRAINT
    check1 = str(parser.get('logistics', 'email'))
    if len(check1) == 0:
        print "WARNING: Zero length email passed. Proceed with caution...."
    #check2 = str(parser.get('logistics','slackChannel'))
    #check3 = str(parser.get('logistics','slackToken'))
    #check4 = str(parser.get('logistics','slackUser'))
    #if len(check1) > 0 and len(check2) > 0:
    #    print "ERROR: You must choose either email or Slack for error reporting."
    #    raise Exception()
    #if len(check1) == 0 and len(check2) == 0:
    #    print "ERROR: You must specify an error reporting method."
    #    raise Exception()
    #if len(check2) > 0 and len(check3) == 0:
    #    print "ERROR: You must enter a Slack token."
    #    raise Exception()
    #if len(check2) > 0 and len(check4) == 0:
    #    print "ERROR: You must enter a Slack user name."
    #    raise Exception()

    check = int(parser.get('logistics', 'nCoresModel'))
    if not check:
        print "ERROR: Number of model cores to use not specified."
        raise Exception()
    if check <= 0:
        print "ERROR: Invalid number of model cores to use."
        raise Exception()
    check = int(parser.get('logistics', 'nNodesModel'))
    if not check:
        print "ERROR: Number of model nodes to use not specified."
        raise Exception()
    if check <= 0:
        print "ERROR: Invalid number of model nodes to use."
        raise Exception()

    # Check calibration/sensitivity activation flags.
    check = int(parser.get('logistics', 'runSens'))
    if check < 0 or check > 1:
        print "ERROR: Invalid runSens flag specified."
        raise Exception()
    check = int(parser.get('logistics', 'runCalib'))
    if check < 0 or check > 1:
        print "ERROR: Invalid runCalib flag specified."
        raise Exception()

    # Check to make sure a valid option was passed for running model/R code
    check = int(parser.get('logistics', 'jobRunType'))
    if check < 1 or check > 6:
        print "ERROR: Invalid jobRunType specified."
        raise Exception()

    check = int(parser.get('logistics', 'analysisRunType'))
    if check < 1 or check > 6:
        print "ERROR: Invalid analysisRunType specified."
        raise Exception()

    check = int(parser.get('logistics', 'nCoresR'))
    if not check:
        print "ERROR: Number of R Cores to use not specified."
        raise Exception()
    check = int(parser.get('logistics', 'nNodesR'))
    if not check:
        print "ERROR: Number of R Nodes to use not specified."
        raise Exception()
    if check <= 0:
        print "ERROR: Invalid number of R Nodes to use."
        raise Exception()

    check = int(parser.get('logistics', 'dailyStats'))
    if check < 0 or check > 1:
        print "ERROR: Invalid dailyStats value specified."
        raise Exception()

    check = int(parser.get('logistics', 'coldStart'))
    if check < 0 or check > 1:
        print "ERROR: Invalid coldStart value specified."
        raise Exception()

    check = int(parser.get('logistics', 'optSpinFlag'))
    if check < 0 or check > 1:
        print "ERROR: Invalid optSpinFlag value specified."
        raise Exception()

    check1 = int(parser.get('logistics', 'coldStart'))
    check2 = int(parser.get('logistics', 'optSpinFlag'))
    if check1 == 1 and check2 == 1:
        print "ERROR: Cannot run cold start calibrations with optional spinup files."
        raise Exception()

    # Check to make sure calibration method is DDS
    check = str(parser.get('logistics', 'calibMethod'))
    if check != "DDS":
        print "ERROR: Invalid calibration method passed to program."
        raise Exception()

    # Check optional calibration output strip options.
    check1 = int(parser.get('logistics', 'stripCalibOutputs'))
    if check1 < 0 or check1 > 1:
        print "ERROR: Invalid stripCalibOutputs option passed to program."
        raise Exception()
    check2 = int(parser.get('logistics', 'stripCalibHours'))
    if check1 == 1:
        if check2 < 0:
            print "ERROR: Invalid stripCalibHours passed to program."
            raise Exception()

    check = str(parser.get('logistics', 'objectiveFunction'))
    if len(check) == 0:
        print "ERROR: Zero length calibration objective function provided."
        raise Exception()
    # For now, restrict the user to a set of pre-defined objective functions.
    if check != "Rmse" and check != "Nse" and check != "NseLog" and check != "NseWt" and check != "Kge" and check != "Msof" and check != "hyperResMultiObj":
        print "ERROR: Only acceptable objectiveFunction values are: Rmse, Nse, NseLog, NseWt, Kge, and hyperResMultiObj"
        raise Exception()

    check = int(parser.get('logistics', 'numIter'))
    if not check:
        print "ERROR: Number of calibration iterations not specified."
        raise Exception()
    if check <= 0:
        print "ERROR: Invalid number of calibration iterations specified."
        raise Exception()

    check = str(parser.get('logistics', 'wrfExe'))
    if len(check) == 0:
        print "ERROR: Zero length executable provided."
        raise Exception()
    if not os.path.isfile(check):
        print "ERROR: File: " + check + " not found."
        raise Exception()

    # Parameter tables
    check = str(parser.get('logistics', 'genParmTbl'))
    if len(check) == 0:
        print "ERROR: Zero length general parameter table provided."
        raise Exception()
    if not os.path.isfile(check):
        print "ERROR: File: " + check + " not found."
        raise Exception()

    check = str(parser.get('logistics', 'mpParmTbl'))
    if len(check) == 0:
        print "ERROR: Zero length MP parameter table provided."
        raise Exception()
    if not os.path.isfile(check):
        print "ERROR: File: " + check + " not found."
        raise Exception()

    check = str(parser.get('logistics', 'urbParmTbl'))
    if len(check) == 0:
        print "ERROR: Zero length urban parameter table provided."
        raise Exception()
    if not os.path.isfile(check):
        print "ERROR: File: " + check + " not found."
        raise Exception()

    check = str(parser.get('logistics', 'vegParmTbl'))
    if len(check) == 0:
        print "ERROR: Zero length vegetation parameter table provided."
        raise Exception()
    if not os.path.isfile(check):
        print "ERROR: File: " + check + " not found."
        raise Exception()

    check = str(parser.get('logistics', 'soilParmTbl'))
    if len(check) == 0:
        print "ERROR: Zero length soil parameter table provided."
        raise Exception()
    if not os.path.isfile(check):
        print "ERROR: File: " + check + " not found."
        raise Exception()

    # Date information
    bDate = parser.get('logistics', 'bSpinDate')
    eDate = parser.get('logistics', 'eSpinDate')
    bDate = datetime.datetime.strptime(str(bDate), '%Y-%m-%d')
    eDate = datetime.datetime.strptime(str(eDate), '%Y-%m-%d')
    if bDate >= eDate:
        print "ERROR: Must specify ending spinup date greater than beginning spinup date."
        raise Exception()

    bDate = parser.get('logistics', 'bCalibDate')
    eDate = parser.get('logistics', 'eCalibDate')
    bEDate = parser.get('logistics', 'bCalibEvalDate')
    bDate = datetime.datetime.strptime(str(bDate), '%Y-%m-%d')
    eDate = datetime.datetime.strptime(str(eDate), '%Y-%m-%d')
    bEDate = datetime.datetime.strptime(str(bEDate), '%Y-%m-%d')
    if bDate >= eDate:
        print "ERROR: Must specify ending calibration date greater than beginning spinup date."
        raise Exception()
    if bEDate >= eDate:
        print "ERROR: Must specify the beginning date for calibration evaluation date " + \
              " that is before the ending date for calibration simulations."
        raise Exception()

    bDate = parser.get('logistics', 'bValidDate')
    eDate = parser.get('logistics', 'eValidDate')
    bEDate = parser.get('logistics', 'bValidEvalDate')
    bDate = datetime.datetime.strptime(str(bDate), '%Y-%m-%d')
    eDate = datetime.datetime.strptime(str(eDate), '%Y-%m-%d')
    bEDate = datetime.datetime.strptime(str(bEDate), '%Y-%m-%d')
    if bDate >= eDate:
        print "ERROR: Must specify ending validation date greater than beginning validation date."
        raise Exception()
    if bEDate >= eDate:
        print "ERROR: Must specify the beginning date for validation evaluation date " + \
              " that is before the ending date for validation simulations."
        raise Exception()

    check = int(parser.get('logistics', 'runSens'))
    # Only check these options if sensitivity analysis has been turned on.
    if check == 1:
        check1 = int(parser.get('Sensitivity', 'sensParmSample'))
        if check1 <= 0:
            print "ERROR: Please choose numSensIter greater than 0."
            raise Exception()
        check2 = int(parser.get('Sensitivity', 'sensBatchNum'))
        if check2 <= 0:
            print "ERROR: Please choose sensBatchNum greater than 0."
            raise Exception()
        bDate = parser.get('Sensitivity', 'bSensDate')
        eDate = parser.get('Sensitivity', 'eSensDate')
        bEDate = parser.get('Sensitivity', 'bSensEvalDate')
        bDate = datetime.datetime.strptime(str(bDate), '%Y-%m-%d')
        eDate = datetime.datetime.strptime(str(eDate), '%Y-%m-%d')
        bEDate = datetime.datetime.strptime(str(bEDate), '%Y-%m-%d')
        if bDate >= eDate:
            print "ERROR: Must specify ending sensitivity date greater than beginning sensitivity date."
            raise Exception()
        if bEDate >= eDate:
            print "ERROR: Must specify the beginning date for sensitivity evaluation date " + \
                  " that is before the ending date for validation simulations."
            raise Exception()

    # Check gauge information
    check1 = str(parser.get('gageInfo', 'gageListFile'))
    check2 = str(parser.get('gageInfo', 'gageListSQL'))
    if len(check1) == 0 and len(check2) == 0:
        print "ERROR: Zero length gage list file and SQL command passed to program."
        raise Exception()
    if len(check1) > 0 and len(check2) > 0:
        print "ERROR: Cannot have both gage list and SQL command."
        raise Exception()
    if len(check1) > 0:
        if not os.path.isfile(check1):
            print "ERROR: File: " + check2 + " not found."
            raise Exception()

    # Check LSM physics options
    check = int(parser.get('lsmPhysics', 'dynVegOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid dynamic vegetation option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'canStomResOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid canopy stomatal resistance option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'btrOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid BTR option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'runoffOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid LSM runoff option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'sfcDragOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid surface drag coefficient option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'frzSoilOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid frozen soil option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'supCoolOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid supercooled water option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'radTransferOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid radiative transfer option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'snAlbOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid snow albedo option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'pcpPartOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid precip partitioning option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'tbotOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid TBOT option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'tempTimeSchOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid temperature time scheme option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'sfcResOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid surface resistence option chosen."
        raise Exception()

    check = int(parser.get('lsmPhysics', 'glacierOption'))
    if check < 0 or check > 4:
        print "ERROR: Invalid glacier option chosen."
        raise Exception()

    # Check soil moisture thickness values
    check = ast.literal_eval(parser.get('lsmPhysics', 'soilThick'))
    if len(check) != 4:
        print "ERROR: Must specify four soil layer thicknesses."
        raise Exception()

    # Check z-level
    check = parser.get('lsmPhysics', 'zLvl')
    if float(check) < 0:
        print "ERROR: zLvl must be greater than or equal to 0.0 meters."
        raise Exception()
    if len(check) == 0:
        print "ERROR: Zero length zLvl passed to program."
        raise Exception()

    # Check forcing options
    check = parser.get('forcing', 'forceType')
    if len(check) == 0:
        print "ERROR: Zero length forceType value passed to program."
        raise Exception()
    if int(check) < 1 or int(check) > 6:
        print "ERROR: Invalid forceType value passed to program."
        raise Exception()

    # Make sure output frequencies aren't < 0
    check = int(parser.get('modelTime', 'forceDt'))
    if check < 0:
        print "ERROR: Invalid forcing DT passed to program."
        raise Exception()

    check = int(parser.get('modelTime', 'lsmDt'))
    if check < 0:
        print "ERROR: Invalid LSM DT passed to program."
        raise Exception()

    check = int(parser.get('modelTime', 'lsmOutDt'))
    if check < 0:
        print "ERROR: Invalid LSM Output DT passed to program."
        raise Exception()

    check = int(parser.get('modelTime', 'lsmRstFreq'))
    if check < 0:
        if check != -9999:
            print "ERROR: Invalid LSM restart frequency passed to program."
            raise Exception()

    check = int(parser.get('modelTime', 'hydroRstFreq'))
    if check < 0:
        if check != -99999:
            print "ERROR: Invalid Hydro restart frequency passed to program."
            raise Exception()

    check = int(parser.get('modelTime', 'hydroOutDt'))
    if check < 0:
        print "ERROR: Invalid Hydro output DT passed to program."
        raise Exception()

    # Check Hydro IO options
    check = parser.get('hydroIO', 'rstType')
    if len(check) == 0:
        print "ERROR: Zero length rstType passed to program."
        raise Exception()
    if int(check) < 0 or int(check) > 1:
        print "ERROR: Invalid rstType passed to program."
        raise Exception()

    check = parser.get('hydroIO', 'ioConfigOutputs')
    if len(check) == 0:
        print "ERROR: Zero length ioConfigOutputs passed to program."
        raise Exception()
    if int(check) < 0 or int(check) > 6:
        print "ERROR: Invalid ioConfigOutputs passed to program."
        raise Exception()

    check = parser.get('hydroIO', 'ioFormOutputs')
    if len(check) == 0:
        print "ERROR: Zero length ioFormOutputs passed to program."
        raise Exception()
    if int(check) < 0 or int(check) > 4:
        print "ERROR: Invalid ioFormOutputs passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'chrtoutDomain'))
    if check < 0 or check > 1:
        print "ERROR: Invalid CHRTOUT_DOMAIN option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'chanObsDomain'))
    if check < 0 or check > 1:
        print "ERROR: Invalid CHANOBS_DOMAIN optino passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'chrtoutGrid'))
    if check < 0 or check > 1:
        print "ERROR: Invalid CHRTOUT_GRID option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'lsmDomain'))
    if check < 0 or check > 1:
        print "ERROR: Invalid LSMOUT_DOMAIN option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'rtoutDomain'))
    if check < 0 or check > 1:
        print "ERROR: Invalid RTOUT_DOMAIN option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'gwOut'))
    if check < 0 or check > 1:
        print "ERROR: Invalid GW_OUT option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'lakeOut'))
    if check < 0 or check > 1:
        print "ERROR: Invalid LAKE_OUT option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'frxstOut'))
    if check < 0 or check > 1:
        print "ERROR: Invalid frxstOut option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'resetHydroAcc'))
    if check < 0 or check > 1:
        print "ERROR: Invalid RSTRT_SWC option passed to program."
        raise Exception()

    check = int(parser.get('hydroIO', 'streamOrderOut'))
    if check < 0 or check > 4:
        print "ERROR: Invalid stream order output option passed to program."
        raise Exception()

    # Check hydro physics options
    check = int(parser.get('hydroPhysics', 'dtChSec'))
    if check < 0:
        print "ERROR: Invalid DTRT_CH option passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'dtTerSec'))
    if check < 0:
        print "ERROR: Invalid DTRT_TER option passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'subRouting'))
    if check < 0 or check > 1:
        print "ERROR: Invalid sub-surface routing switch passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'ovrRouting'))
    if check < 0 or check > 1:
        print "ERROR: Invalid overland routing switch passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'channelRouting'))
    if check < 0 or check > 1:
        print "ERROR: Invalid channel routing switch passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'rtOpt'))
    if check < 0 or check > 2:
        print "ERROR: Invalid overland/subsurface routing option passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'chanRtOpt'))
    if check < 0 or check > 3:
        print "ERROR: Invalid channel routing option passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'udmpOpt'))
    if check < 0 or check > 1:
        print "ERROR: Invalid user-defined mapping option passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'gwBaseSw'))
    if check < 0 or check > 1:
        print "ERROR: Invalid groundwater bucket switch passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'gwRestart'))
    if check < 0 or check > 1:
        print "ERROR: Invalid ground water restart switch passed to program."
        raise Exception()

    check = int(parser.get('hydroPhysics', 'compoundChannel'))
    if check < 0 or check > 1:
        print "ERROR: Invalid compoundChannel switch passed to program."
        raise Exception()

    # Ensure muskingum cunge routing has been chosen if compound channel is activated.
    check1 = int(parser.get('hydroPhysics', 'compoundChannel'))
    check2 = int(parser.get('hydroPhysics', 'chanRtOpt'))
    if check1 == 1 and check2 != 2:
        print "ERROR: Compound channel can only be used with Muskingum Cunge Reach channel routing."
        raise Exception()
Example #49
0
class ImproperlyConfigured(Exception):
    ...


def value(variable):
    return os.environ.get(variable)


def required_value(variable):
    val = os.environ.get(variable)
    if val is None:
        raise ImproperlyConfigured('Required environment variables could not be found.')
    return val


v, rv = value, required_value

CONF = AttrDict.from_data(
    {
        'DEBUG': literal_eval(rv('DEBUG')),  # Maybe use a config library here?
        'SECRETS': {
            'BOT_TOKEN': rv('SECRETS.BOT_TOKEN'),
        },
        'COGS': {
            'CODE_SHARING': {
                'HASTEBIN_SERVER': rv('COGS.CODE_SHARING.HASTEBIN_SERVER')
            }
        }
    }
)
Example #50
0
from discord.ext import commands

logging.basicConfig(
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("ClassScraper.log"),
        logging.StreamHandler(),
    ],
    level=logging.INFO)
logging.getLogger("discord").setLevel(logging.WARNING)
LOGGER = logging.getLogger(__name__)
LOGGER.info("Starting bot")

load_dotenv("config.env")

BOT_PREFIX = os.environ.get("BOT_PREFIX")
BOT_TOKEN = os.environ.get("BOT_TOKEN")
DB_URI = os.environ.get("DATABASE_URL")
KEY = os.environ.get("KEY")
CUSTOM_STATUS = os.environ.get("CUSTOM_STATUS" or None)
DEF_GUILD_ID = int(os.environ.get("DEF_GUILD_ID")) or None
DAILY_TASK = ast.literal_eval(os.environ.get("DAILY_TASK") or "False")
DAILY_TASK_TIME = int(os.environ.get("DAILY_TASK_TIME") or 0)  # UTC
SCHEDULE_CHANNEL = int(os.environ.get("SCHEDULE_CHANNEL") or 0)
TASK_MSG_PLACEHOLDER = int(os.environ.get("TASK_MSG_PLACEHOLDER") or 0)

# disabling warning when getting logs
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
bot = commands.Bot(command_prefix=BOT_PREFIX)  # pylint: disable = invalid-name
bot.remove_command("help")  # removing the default help
Example #51
0
File: VD.py Project: CCI-MOC/ABMI
def getVulnerability(pName, pVersion, osName, osVersion, arch):
    vulnerableP = 0
    print_list = []

    with open("op.json") as json_data:
        usn_json = json.load(json_data)
        # for os_names in usn_json:
        if osName in ["rhel", "centos"]:
            os_names = "RHSA"
        elif osName == "ubuntu":
            os_names = "usn"
        else:
            os_names = "deb"

        dataset = usn_json[os_names]
        for id_val in dataset:
            notice_obj = dataset[id_val]

            if "fixes" in notice_obj.keys():
                fixes = notice_obj["fixes"]

                for fix in fixes:
                    fix_os = fix["os"]
                    fix_packages = fix["packages"]

                    if osName == fix_os["name"]:

                        for fix_package in fix_packages:
                            # print("usn package version = ",fix_package["release"])
                            # print("frame package version = ",pVersion)
                            fix_dict = {}
                            if pName == fix_package[
                                    "name"] and pVersion == fix_package[
                                        "release"]:

                                if "id" in notice_obj.keys():
                                    # print("id = ",notice_obj["id"])
                                    fix_dict["id"] = notice_obj["id"]

                                # print("package name = ", pName)
                                # print("package Version = ", pVersion)
                                # print("OS name = ", osName)
                                fix_dict["pName"] = pName
                                fix_dict["pVersion"] = pVersion
                                fix_dict["osName"] = osName
                                fix_dict["summary"] = notice_obj["summary"]

                                if "severity" in notice_obj.keys():
                                    fix_dict["severity"] = notice_obj[
                                        "severity"]

                                if "category" in notice_obj.keys():
                                    fix_dict["category"] = notice_obj[
                                        "category"]

                                print_list.append(fix_dict)

    final_set = [
        ast.literal_eval(el1) for el1 in set([str(el2) for el2 in print_list])
    ]
    return final_set
Example #52
0
# Advent Of Code: Dia 25

import networkx as nx
from ast import literal_eval

def manhattan((X,Y,Z,T), (X2,Y2,Z2,T2)):
	return abs(X2-X) + abs(Y2-Y) + abs(Z2-Z) + abs(T2-T)

# Lee entrada
with open('entradaDia25') as f:
	inp = [l.rstrip() for l in f.readlines()]

grafo = nx.Graph()
for i in range(len(inp)):
	c = literal_eval("(" + inp[i] + ")")
	grafo.add_node(c)
	for j in range(i+1, len(inp)):
		c2 = literal_eval("(" + inp[j] + ")")
		if manhattan(c, c2) <= 3:
			grafo.add_edge(c, c2)

print "Solucion:", nx.number_connected_components(grafo)
    def __init__(self, json_path, embedding_size=512, balance=False, filter_years=None, print_samples=False,
                trunc=0, outpfx='', metadata_tag='', debug=False):
        """ """
        PUNCT = ['.','!','?']

        if filter_years is not None and filter_years != 0:
            if not isinstance(filter_years, list):
                filter_years = [filter_years]
            for i in range(len(filter_years)):
                filter_years[i] = str(filter_years[i])

        year_data = defaultdict(int)

        #collect sample reviews
        if print_samples:
            buckets = [''] * 10
            text_capture = {}
            for i in range(RNG_STARS):
                text_capture[i] = buckets[:]

        #optionally, prepare for sampling with class (review stars) balancing
        RECORD_BATCH_SIZE = 10000
        record_goal = RECORD_BATCH_SIZE
        record_counters = np.zeros(RNG_STARS, dtype=int)

        #record components
        labels = []
        reviews = []
        sentence_breaks = []

        max_sentence_words = 0
        max_document_sentences = 0
        max_document_words = 0

        #process json one line at a time
        file_size = os.path.getsize(json_path)

        english_vocab = set('i we is the for at to are this too very one and but she her he it'.split())

        french_vocab = set('je une mais votre'.split())
        french_count = 0
        german_vocab = set('ich auch wir nein'.split())
        german_count = 0

        with open(json_path,'r') as f:
            lineno = 0
            curr_pos = 0
            last_pct_cpt = 0
            nbr_retained = 0

            for line in f:
                lineno += 1
                if lineno % 5000 == 0:
                    pct_cpt = int(100 * (curr_pos / file_size))
                    if pct_cpt != last_pct_cpt:
                        last_pct_cpt = pct_cpt
                        sys.stdout.write(f"processing review {lineno} - {pct_cpt}% complete - reviews retained: {nbr_retained}  \r")
                        sys.stdout.flush()

                        # short runs for end-to-end debug
                        '''
                        if debug:
                            break
                        '''
                dic = ast.literal_eval(line)
                curr_pos += len(line)

                #extract records from "filter_years", a list, (to reduce dataset size)
                year = dic['date'][:4]
                year_data[year] += 1
                if filter_years:
                    if year not in filter_years:
                        continue

                #ensure review stars (the label) is in expected range
                stars = int(dic['stars']) - MIN_STARS
                if stars < 0 or stars > RNG_STARS:
                    continue

                #balance samples by label/stars
                if balance:
                    old_record_goal = record_goal
                    record_goal, bypass_record = self.balance(stars, record_goal, RECORD_BATCH_SIZE, record_counters)

                    if old_record_goal != record_goal:
                        s1 = record_counters[0]
                        s2 = record_counters[1]
                        s3 = record_counters[2]
                        s4 = record_counters[3]
                        s5 = record_counters[4]
                        print(f"line: {lineno} stars 1: {s1} 2: {s3} 3: {s3} 4: {s4} 5: {s5}                                       \r")

                    if bypass_record:
                        continue

                #sanitize text
                raw_text = dic['text']
                text = raw_text.lower()
                text = re.sub(" vs\. ", " vs ", text)
                text = re.sub("dr\. ", "dr ", text)
                text = re.sub("mr\. ", "mr ", text)
                text = re.sub("mrs\. ", "mrs ", text)
                text = re.sub(" ms\. ", " ms ", text)
                text = re.sub(" inc\. ", " inc ", text)
                text = re.sub(" llc\. ", " llc ", text)
                text = re.sub(" ltd\. ", " ltd ", text)
                text = re.sub("approx\. ", " approx ", text)
                text = re.sub("appt\. ", " appt ", text)
                text = re.sub(" apt\. ", " apt ", text)
                text = re.sub("i\.e\.", " ie ", text)
                text = re.sub("e\.g\.", " ie ", text)          # for example
                text = re.sub(" p\.s\.", "", text)
                text = re.sub(" p\.s", "", text)
                text = re.sub(" a\.m\.", " AM", text)
                text = re.sub(" p\.m\.", " PM", text)
                text = re.sub("\'re ", " are ", text)           # we're, you're, they're
                text = re.sub("(s)", "s", text)
                text = re.sub("\'", '', text)                   # \' char escape required - this wasnt getting done Nov 15, 2019
                text = re.sub('-', '', text)                    # e-mail, etc
                text = re.sub('`', '', text)                    # joe`s
                text = re.sub("\.{2,}", '.', text)
                text = re.sub('[^\w_|\.|\?|!]+', ' ', text)
                text = re.sub('\.', ' . ', text)
                text = re.sub('\?', ' ? ', text)
                text = re.sub('!', ' ! ', text)

                #tokenize, drop empty reviews
                text = text.split()
                text_units = len(text)
                if text_units == 0:
                    continue

                text_set = set(text)
                """
                if english_vocab.isdisjoint(text_set):
                    print(f"\nTHIS DOES NOT APPEAR TO BE ENGLISH\n {text}\n")
                    continue
                """    
                if not german_vocab.isdisjoint(text_set):
                    german_count += 1
                    #print(f"discarding german sentence: {german_count} \n{text}")
                    continue

                if not french_vocab.isdisjoint(text_set):
                    french_count += 1
                    #print(f"discarding french sentence: {french_count} \n{text}")
                    continue

                #capture representative examples 
                if print_samples:
                    bucket = int(text_units / 100)
                    if bucket < len(buckets):            # 100 token buckets
                        saver = text_capture[stars]
                        if saver[bucket] == '':
                            saver[bucket] = raw_text

                #apply truncation if any, ensure text is sentence-terminated
                if trunc > 0 and text_units > trunc:
                    text = text[:trunc]
                if text[-1] not in PUNCT:
                    text.append(PUNCT[0])

                sentences = []          # list of document sentences
                sentence = []           # list of sentence words
                breaks = []             # list of sentence lengths
                word_count = 0          # nbr words in document   

                #split text into sentences
                for t in text:
                    sentence.append(t)
                    if t in PUNCT:
                        sentence_len = len(sentence)
                        if sentence_len > 1:
                            word_count += sentence_len
                            sentences.append(sentence)
                            breaks.append(word_count)

                            if word_count > max_document_words:
                                max_document_words = word_count
                            if sentence_len > max_sentence_words:
                                max_sentence_words = sentence_len
                                if debug:
                                    print("")
                                    print(f"*** longest sentence encountered thus far - {sentence_len} tokens ***")
                                    print("")
                                    print(sentence)
                                    print("")
                                    #print(raw_text)
                                    #print("")
                        sentence = []

                #add split sentences to reviews
                if len(sentences) > 0:
                    reviews.append(sentences)
                    if len(sentences) > max_document_sentences:
                        max_document_sentences = len(sentences)

                    #add label and sentence boundaries 
                    labels.append(dic['stars'])
                    sentence_breaks.append(breaks)
                    nbr_retained += 1

        print('\nsaved %i reviews' % len(reviews))

        if print_samples:
            print(" ")
            print("A sampling of reviews based on rating and length....")
            print_sample_text(text_capture)

        years = sorted(year_data, reverse=True)
        for year in years:
            print(f"{year} - records: {year_data[year]}")

        #generate Word2Vec embeddings, use all processed raw text to train word2vec
        print("generating word2vec embeddings")
        self.all_sentences = [sentence for document in reviews for sentence in document]
        self.model = Word2Vec(self.all_sentences, min_count=5, size=embedding_size, workers=4, iter=5)
        self.model.init_sims(replace=True)

        #save all word embeddings to matrix
        print("saving word vectors to matrix")
        self.vocab = np.zeros((len(self.model.wv.vocab)+1,embedding_size))
        word2id = {}

        #first row of embedding matrix isn't used so that 0 can be masked
        for key, val in self.model.wv.vocab.items():
            idx = val.__dict__['index'] + 1
            self.vocab[idx, :] = self.model[key]
            word2id[key] = idx

        #normalize embeddings
        self.vocab -= self.vocab.mean()
        self.vocab /= (self.vocab.std()*2.5)

        #reset first row to 0
        self.vocab[0,:] = np.zeros((embedding_size))

        #add additional word embedding for unknown words
        self.vocab = np.concatenate((self.vocab, np.random.rand(1,embedding_size)))

        #index for unknown words
        unk = len(self.vocab)-1

        #capture word, sentence and rating (stars) distributions
        word_cap = 50
        word_size = 100
        word_hist = np.zeros(word_cap, dtype='int32')

        sent_cap = 40
        sent_size = 5
        sent_hist = np.zeros(sent_cap, dtype='int32')

        l_hist = np.zeros(RNG_STARS, dtype='int32')

        #convert words to word indicies
        print("converting words to indices")
        self.data = []

        for idx, document in enumerate(reviews):
            dic = {}
            dic['text'] = document

            len_sentences = len(document)
            sent_ndx = int(len_sentences / sent_size)
            if sent_ndx >= sent_cap:
                sent_ndx = sent_cap - 1
            sent_hist[sent_ndx] += 1

            # Up to this point the logic mimics that found in feature_extraction_yelp.py. 
            # indicies is a list of lists, each token of the latter is an English word.
            # Now we reach into the logic of tf_cnn.py to identify additional preprocessing
            # that can be done out-of-line, i.e. here.
            #
            # tf_cnn.py first flattens the document from a list of sentences and then flattens 
            # it into a list of words, i.e. run-on sentences.
            #
            # It then takes an in-storage document array like that built here (as tfrecords)
            # and applies LabelEncoder and LabelBinarizer to convert the floating point 'stars'
            # labels ranging from 1.0 to 5.0 to zero based integers 0 through 4 and then to 
            # one-hot encodings (as LabelBinarizer.transform does). We use these ranges
            # to generate the labels directly using these presumptions one record at
            # a time.

            indicies = []
            for sentence in document:
                len_sentence = len(sentence)
                word_ndx = int(len_sentence / word_size)
                if word_ndx >= word_cap:
                    word_ndx = word_cap - 1
                word_hist[word_ndx] += 1

                for word in sentence:
                    if word in word2id:
                        token = word2id[word]
                    else:
                        token = unk
                    indicies.append(token)

            # add digitized words and sentence boundaries
            dic['idx'] = indicies
            dic['breaks'] = sentence_breaks[idx]

            # convert label of stated range to a one-hot array
            dic['label'] = labels[idx]                              # the old label, just for reference
            int_lbl = int(labels[idx])
            ndx_lbl = int_lbl - MIN_STARS
            if ndx_lbl < 0 or ndx_lbl >= RNG_STARS:
                continue
            l_hist[ndx_lbl] += 1

            nbr_stars = int(labels[idx]) - MIN_STARS
            one_hot_stars = np.zeros(RNG_STARS).astype(np.int)
            one_hot_stars[nbr_stars] = 1
            dic['label'] = one_hot_stars                            # the new label, overwriting the above
            self.data.append(dic)

        # display distributions
        print(" ")
        print(f"Number of discarded French reviews: {french_count}")
        print(f"Number of discarded German reviews: {german_count}")

        print(" ")
        print("Review length distribution - #sentences")
        for i in range(sent_cap):
            tag = i * sent_size
            nbr_hits = sent_hist[i]
            if nbr_hits > 0:
                print("%4d - %4d: %7d" % (tag, tag + sent_size - 1, sent_hist[i]))

        print(" ")
        print("Review length distribution #words")
        for i in range(word_cap):
            tag = i * word_size
            nbr_hits = word_hist[i]
            if nbr_hits > 0:
                print("%4d - %4d: %7d" % (tag, tag + word_size - 1, word_hist[i]))

        print(" ")
        print("Rating summary (# stars)")
        distribution_list = []
        for i in range(RNG_STARS):
            print("%d - %7d" % ((i + MIN_STARS), l_hist[i]))
            distribution_list.append(l_hist[i])

        # capture tfrecord metadata
        # max_review_words is the number of words contained in the longest review.
        # classes is the number of stars assigned by the reviewer - currently one 
        # through five for a total of five classifications.

        # generate tfrecords 
        random.shuffle(self.data)
        self.iterable_data = Supervised_data(self.data)
        train_count, test_count = self.convert_to_tfr(self.iterable_data, "HCAN.tfrecords", outpfx=outpfx)

        metadata = dict(
            max_review_words=max_document_words,
            max_review_sentences=max_document_sentences,
            max_sentence_words=max_sentence_words,
            classes=RNG_STARS,
            balanced=balance,
            notes=metadata_tag,
#           distribution=distribution_list,      ### this causes JSON error - list of ints
            train_count=train_count,
            test_count=test_count
        )
        print(f"Review maximum sentences: {max_document_sentences} words: {max_document_words}")
        print(f"There are {RNG_STARS} classifications")
        with open(outpfx + 'HCAN-metadata.json', 'w') as f:
            json.dump(metadata, f)
Example #54
0
from ConfigParser import ConfigParser
config = ConfigParser()
config.read('server.conf')

# Configure below
LISTEN_HOST = config.get('SERVER', 'host')
LISTEN_PORT = int(config.get('SERVER', 'port'))

KEYFILE  = config.get('SERVER', 'keyfile')
CERTFILE = config.get('SERVER', 'certfile')

#Easiest way to create the key file pair was to use OpenSSL -- http://openssl.org/ Windows binaries are available
#You can create a self-signed certificate easily "openssl req -new -x509 -days 365 -nodes -out cert.pem -keyout privatekey.pem"
#for more information --  http://docs.python.org/library/ssl.html#ssl-certificates

userPassDict = ast.literal_eval(config.get('AUTH', 'users'))
   
class SimpleXMLRPCServerTLS(SimpleXMLRPCServer):
    def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
                 logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
        """Overriding __init__ method of the SimpleXMLRPCServer

        The method is an exact copy, except the TCPServer __init__
        call, which is rewritten using TLS
        """
        self.logRequests = logRequests

        SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)

        """This is the modified part. Original code was:
# !unzip mallet-2.0.8.zip

# because we mounted at gdrive/My Drive, need to move mallet-2.0.8 from there to content folder

# os.environ['MALLET_HOME'] = '/content/mallet-2.0.8'
# mallet_path = '/content/mallet-2.0.8/bin/mallet'

#snippet for desktop
# Download File: http://mallet.cs.umass.edu/dist/mallet-2.0.8.zip\
os.environ.update({'MALLET_HOME': r'C:/Users/Shawn/Desktop/mallet-2.0.8'
                   })  #change to location where mallet is
mallet_path = r'C:\\Users\\Shawn\\Desktop\\mallet-2.0.8\\bin\\mallet'  # update this path

df_csl = pd.read_csv(' ')  #include path to cleaned file

NA_lemmatized_list = [ast.literal_eval(i) for i in df_csl['Lemmatized']]

dict_NA_lemmatized = gensim.corpora.Dictionary(
    NA_lemmatized_list)  #generating dictionary
vecs_NA_lemmatized = docs2vecs(NA_lemmatized_list,
                               dict_NA_lemmatized)  #generating vectors

#for timing duration of model, can remove if unnecessary
start = datetime.datetime.now()
print(start)

ldamallet_50 = gensim.models.wrappers.LdaMallet(mallet_path,
                                                corpus=vecs_NA_lemmatized,
                                                num_topics=50,
                                                id2word=dict_NA_lemmatized)
                    addNode(u[0], spd[u[0]] + d)
    return 'done'

#############3#main#################3

points = []
try:
    graph = open('out.txt', 'r')
    points = [int(sys.argv[i]) for i in range(1, len(sys.argv))]
except:
    graph.close()
    exit(1)

e = graph.readline().rstrip("\n")
while e:
    struct.append(ast.literal_eval(e))
    e = graph.readline().rstrip("\n")
graph.close()

dijkstra = Dijstra(points[0], points[1])

if dijkstra == 'done':
    print('not connected')
    exit(1)

astar = Astar(points[0], points[1])

print('\nDijkstra results: ')
print('distance to node %s is: %s' % (points[1], dijkstra[0]))
print('total visits: ' , dijkstra[2])
print('path: ', dijkstra[1])
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
import ast
import sys
from pprint import pprint
import numpy as np
from statistics import mean, median, pstdev, pvariance

# CONFIG
thres_annot = 50000

#with open("make_stats_ttl_biflow_port23_2323_windows.output", "r") as f:
with open("make_stats_ttl_biflow.output", "r") as f:
    temp = f.read()

baseArray = ast.literal_eval(temp)
baseArray23 = baseArray[0]
baseArray2323 = baseArray[1]
baseArraySession = baseArray[2]

dico_delta_ttl_23_6h = baseArray23[0]
dico_delta_ttl_23_24h = baseArray23[1]
dico_delta_ttl_23_10s = baseArray23[2]
tot_num_23 = baseArray23[3]

dico_delta_ttl_2323_6h = baseArray2323[0]
dico_delta_ttl_2323_24h = baseArray2323[1]
dico_delta_ttl_2323_10s = baseArray2323[2]
tot_num_2323 = baseArray2323[3]

dico_delta_ttl_session_6h = baseArraySession[0]
Example #58
0
import csv
import json
import ast

inflation_csv = open("CPI.csv", 'r')
inflation_json = open("CPI_data.json", 'w')

reader = csv.DictReader(inflation_csv)
jsonList = []

for row in reader:
    jsonList.append(ast.literal_eval(json.dumps(row)))

inflation_json.write(str(jsonList))
inflation_json.close()

# inflation_json = list(open("inflation_data.json", 'r'))
#
# x = (ast.literal_eval(inflation_json[0]))
#
# y = json.dumps(x)
# print(type(y))

#print(x[5].get("DATE"))

# data = json.loads(x)
# print(data)

# print('[', end='')
# for obj in jsonList:
# 	if not (obj == jsonList[-1]):
Example #59
0
 def intersection(self, arguments: arguments.Arguments):
     from ast import literal_eval
     list_one = set(literal_eval(arguments.first_param))
     list_two = set(literal_eval(arguments.second_param))
     return list_one.intersection(list_two)
Example #60
0
import discord
from discord.ext import commands
import ast
import random
import redditVideoConverter
import asyncio
import re
from datetime import datetime, date
import time
from pytz import utc
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
import sqlite3

with open('discordBot.secret', 'r') as secretFile:
    secrets = ast.literal_eval(secretFile.read())
    botAuthToken = secrets['botAuthToken']

scheduler = AsyncIOScheduler(
    jobstores={'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')})
scheduler.start()


async def sendMessage(guildId, channelId, message):
    guild = bot.get_guild(guildId)
    channel = guild.get_channel(channelId)
    await channel.send(message)


bot = commands.Bot(command_prefix='$')