예제 #1
0
파일: database.py 프로젝트: 11craft/blib
 def __init__(self, billings_path):
     db_path = os.path.abspath(os.path.join(billings_path, "Database", "billings.bid"))
     uri = "sqlite:///" + db_path
     SqlSoup.__init__(self, uri)
     self.setup_constants()
     self.setup_relations()
     self.setup_column_introspection()
예제 #2
0
def raw_sql(sql):
    engine = create_engine(
        "mysql://%s:%s@%s/%s"
        % (CLOUD_MONITOR_MYSQL_USERNAME, CLOUD_MONITOR_MYSQL_PASSWORD, CLOUD_MONITOR_MYSQL_HOST, CLOUD_MONITOR_MYSQL_DB)
    )
    db = SqlSoup(engine)
    return db.execute(sql).fetchall()
예제 #3
0
def getSupression():
    global db_url
    global supressions
    global ports
    global ip_ports
#This is to get all guarantees from the sql in the dwarf-server
    options = {"sql_connection": db_url}
    db = SqlSoup(options["sql_connection"])
    sup_info = db.supression.all()
    c_time = int(time.time())
    db.commit()
    for sup in sup_info:
        src_ip = sup.src_ip
        port_name = ip_ports[src_ip]["port_name"]
        print "getting db"
        print port_name
        for pid in ports:
            if port_name == ports[pid].port_name:
                print "this supression is mine"
                supress = sup.supression
                o_time = sup.time
                if port_name in supressions:
                    if c_time < (o_time + 10):
                        print "wow, a new one!" 
                        supressions[port_name]=supress
                    else:
                        del supressions[port_name]
예제 #4
0
파일: views.py 프로젝트: blorticus/mypam
def rest_service( request, service, version, resource ):
    db = SqlSoup('postgres://%s/%s' % ('localhost', 'mypam'))
    method = request.method
    rp = db.execute("""SELECT rest.datasource.url, rest.resource.query, standard.array_accum( rest.parameter.name ) FROM rest.datasource 
        JOIN rest.service ON rest.service.datasource_id=rest.datasource.id
        JOIN rest.version ON rest.version.service_id=rest.service.id
        JOIN rest.resource ON rest.resource.version_id=rest.version.id
        LEFT JOIN rest.parameter ON rest.parameter.resource_id=rest.resource.id
        WHERE rest.service.name=:service AND rest.version.version=:version AND rest.resource.name=:resource AND rest.resource.method=:method
        GROUP BY rest.datasource.url, rest.resource.query""", 
        params={ 'service' : service, 'version' : version, 'resource' : resource, 'method' : method })
    row = rp.first()
    if row != None:
        ( datasource_url, query, parameters ) = row
        service_db = SqlSoup( datasource_url )
        query_params = {}
        for parameter in parameters:
            if parameter != None:
                query_params[ parameter ] = request.REQUEST.get( parameter )
        try:
            query_rp = db.execute( query, params=query_params )
            query_row = query_rp.first()
        except Exception as exc:
            return HttpResponseBadRequest( str( exc ), content_type="text/plain" )
        if query_row != None:
            print query_row
            return HttpResponse( query_row[0], content_type="text/plain")
    return HttpResponseNotFound("service=[%s] version=[%s] resource=[%s] not found" % (service, version, resource), 
        content_type="text/plain")
예제 #5
0
def scrape_vimeo(query):
    # Extract a UTC datetime object
    utc_datetime = datetime.datetime.utcnow()
    # Connect to the DB; if we can't, this will fail anyway.
    db = SqlSoup(dbconfig.dburl)

    page = urllib2.urlopen(query).read()
    soup = BeautifulSoup(page)
    count_regex = re.compile('\+ Browse all ([0-9.]+)K videos')

    for license_code in license_codes:
        license = 'http://creativecommons.org/licenses/%s/3.0/' % license_code
        count_href = '/creativecommons/' + license_code
        count_text = soup.find('a', 'more', href=count_href).string
        count = count_regex.match(count_text).group(1)
        if count:
	    # Vimeo notates number of videos as [N]K, where N may be a floating
	    # point number, but the true number will never be floating, so just
	    # do the multiplication, then convert it to an integer.
	    real_count = int(float(count) * 1000)
            # Build a db row
            row = {}
            row['utc_time_stamp'] = utc_datetime
            row['site'] = 'http://vimeo.com/'
            row['license_uri'] = license
            row['count'] = real_count
            # Insert count into site_specific table
            db.site_specific.insert(**row)
            db.flush()
        else:
            raise Exception, "No count found at Vimeo for license: " + license
예제 #6
0
    def start(self):
        super(MySQLBind9Backend, self).start()

        if cfg.CONF[self.name].write_database:
            self._engine = get_engine(self.name)
            self._db = SqlSoup(self._engine)

        self._sync_domains()
예제 #7
0
def plominoSqlSync(session, plominoDocument, **table_infos):
    """
    table_infos = dict(schema='<schema_table>')
    """

    if isinstance(session, basestring):
        session = named_scoped_session(session)

    engine = session.get_bind()

    db = SqlSoup(engine, session=session)

    table_name = plominoDocument.Form
    main_table = db.entity(table_name, **table_infos)

    values = dict()
    plominoItems = plominoDocument.getItems()
    for column in main_table.c:
        if column.key == u"id":
            continue
        if column.key in plominoItems and (plominoDocument.getItem(column.key, None) != None):
            values[column.key] = plominoDocument.getItem(column.key)

    plominoDatabase = plominoDocument.getParentDatabase()
    values[u"modified_by"] = plominoDatabase.getCurrentUser().id
    values[u"last_update"] = plominoDocument.plomino_modification_time.asdatetime()

    if plominoDocument.isNewDocument():
        values[u"plominoId"] = plominoDocument.id
        main_table.insert(**values)
    else:
        if not main_table.filter(main_table.plominoId == plominoDocument.id).update(values):
            values[u"plominoId"] = plominoDocument.id
            main_table.insert(**values)

    plominoForm = plominoDatabase.getForm(plominoDocument.Form)
    plominoFields = plominoForm.getFormFields(includesubforms=True, applyhidewhen=True)

    for field in plominoFields:
        if field.getFieldType() in ("DATAGRID",) and (plominoDocument.getItem(field.id) != None):
            gridItem = plominoDocument.getItem(field.id)
            grid_table_name = field.id  # oppure field.getSettings(key='associated_form')
            try:
                grid_table = db.entity(grid_table_name, **table_infos)
            except NoSuchTableError, err:
                pass
            else:
                grid_table.filter(grid_table.parentId == plominoDocument.id).delete()

                vals = dict()
                for record in gridItem:
                    field_mapping = field.getSettings(key="field_mapping").split(",")
                    for idx, key in enumerate(field_mapping):
                        if record[idx] != None:
                            vals[key] = record[idx]
                    vals[u"parentId"] = plominoDocument.id
                    grid_table.insert(**vals)
예제 #8
0
def migrateVersion3():
    log.info('Upgrading DB to version 3.')

    # for some normal executions
    db = SqlSoup(engine)

    try:
        db.execute('ALTER TABLE Movie ADD dateChanged TIMESTAMP')
        log.info('Added dateChanged to Movie table')
    except OperationalError:
        log.debug('Column dateChanged already added')

    Session.add(DbVersion(3))
    Session.flush()
예제 #9
0
 def testconnection(self, param):
     '''
     This function lets you test the to be saved SQL connection.
           
     :param param: A dictionary with all necessary parameter to test the connection.
     :type param: dict
     
     :return: Tuple of success and a description
     :rtype: (bool, string)  
     
     Parameters are: Server, Driver, Database, User, Password, Port, Limit, Table, Map
                     Where, Encoding, conParams
         
     '''
     
     num=-1
     desc=None
     
     port = ""
     password = ""
     conParams = ""
     if param.get("Port"):
         port = ":%s" % param.get("Port")
     if param.get("Password"):
         password = "******" % param.get("Password")
     if param.get("conParams"):
         conParams = "?%s" % param.get("conParams")
     connect_string = "%s://%s%s%s%s%s/%s%s" % (param.get("Driver"),
                                                 param.get("User"),
                                                 password,
                                                 "@" if (param.get("User") or password) else "",
                                                 param.get("Server"),
                                                 port,
                                                 param.get("Database"),
                                                 conParams)
     log.info("using the connect string %s" % connect_string)
     engine = create_engine(connect_string)
     # create a configured "Session" class
     session = sessionmaker(bind=engine)()
     db = SqlSoup(engine)
     TABLE = db.entity(param.get("Table"))
         
     try:    
         result = session.query(TABLE).count()
         num = result
         desc = "Found %i users." % num
     except Exception as exx:
         desc = "failed to retrieve users: %s" % exx
         
     return (num, desc)
예제 #10
0
def migrateVersion3():
    log.info('Upgrading DB to version 3.')

    # for some normal executions
    db = SqlSoup(engine)

    try:
        db.execute('ALTER TABLE Movie ADD dateChanged TIMESTAMP')
        log.info('Added dateChanged to Movie table')
    except OperationalError:
        log.debug('Column dateChanged already added')

    Session.add(DbVersion(3))
    Session.flush()
예제 #11
0
def main():
    db = SqlSoup(dbconfig.dburl)
    for filename in sorted(glob.glob(FLICKR_DATA_BASE_PATH + '/*.csv')):
        if 'cumulative' in filename:
            continue
        utc_date_time = filename2utc_datetime(filename)
        csv_fd = csv.reader(open(filename))
        print 'Importing', filename,
        for row in csv_fd:
            importable = csv_row2dict(row, utc_date_time)
            db.site_specific.insert(**importable)
            db.flush()
        # since that worked, rename the filename
        os.rename(filename, filename + '.imported')
        print '...done.'
예제 #12
0
파일: SincroDB.py 프로젝트: fmarella/pg3
    def connectDbRemote(self):
        """ effettua la connessione al DB remoto """
        SessionRemote = None
        self.metaRemote = None
#        self.sessionRemote.close_all()
#        self.engine.dispose() # added this
#        del self.sessionRemote
#        del self.engine
        mainschema_remoto = Environment.conf.SincroDB.mainschema_remoto
        user_remoto = Environment.conf.SincroDB.user_remoto
        password_remoto = Environment.conf.SincroDB.password_remoto
        host_remoto = Environment.conf.SincroDB.host_remoto
        port_remoto = Environment.conf.SincroDB.port_remoto
        database_remoto = Environment.conf.SincroDB.database_remoto

        engine = create_engine('postgres:'+'//'
                                +user_remoto+':'
                                + password_remoto+ '@'
                                + host_remoto + ':'
                                + port_remoto + '/'
                                + database_remoto,
                                encoding='utf-8',
                                convert_unicode=True )
        tipo_eng = engine.name
        engine.echo = False
        self.metaRemote = MetaData(engine)
        self.pg_db_server_remote = SqlSoup(self.metaRemote)
        self.pg_db_server_remote.schema = Environment.params["schema"]
        self.pg_db_server_main_remote = SqlSoup(self.metaRemote)
        self.pg_db_server_main_remote.schema = mainschema_remoto
        SessionRemote = scoped_session(sessionmaker(bind=engine))
        self.sessionRemote = SessionRemote()
        print ">>>> CONNESSO AL DB REMOTO : %s IP: %s PORTA: %s SCHEMA %s <<<<< " %(database_remoto, host_remoto, port_remoto, Environment.params["schema"])
예제 #13
0
def main():
    cfg.CONF(args=sys.argv, project='quantum')

    # (TODO) gary - swap with common logging
    logging_config.setup_logging(cfg.CONF)

    integ_br = cfg.CONF.OVS.integration_bridge
    root_helper = cfg.CONF.AGENT.root_helper
    options = {"sql_connection": cfg.CONF.DATABASE.sql_connection}
    db = SqlSoup(options["sql_connection"])

    LOG.info(
        _("Connecting to database \"%(database)s\" on %(host)s") % {
            "database": db.engine.url.database,
            "host": db.engine.url.host
        })
    ofp_rest_api_addr = check_ofp_rest_api_addr(db)

    tunnel_ip = _get_tunnel_ip()
    LOG.debug(_('tunnel_ip %s'), tunnel_ip)
    ovsdb_port = cfg.CONF.OVS.ovsdb_port
    LOG.debug(_('ovsdb_port %s'), ovsdb_port)
    ovsdb_ip = _get_ovsdb_ip()
    LOG.debug(_('ovsdb_ip %s'), ovsdb_ip)
    try:
        OVSQuantumOFPRyuAgent(integ_br, ofp_rest_api_addr, tunnel_ip, ovsdb_ip,
                              ovsdb_port, root_helper)
    except httplib.HTTPException, e:
        LOG.error(_("initialization failed: %s"), e)
        sys.exit(1)
예제 #14
0
파일: loader.py 프로젝트: danciul/openquake
    def _sql_soup_init(self, schema):
        """
            Gets the schema to connect
            to the db, creates a SqlSoup instance, sets the schema

            :param schema: database schema
            :type schema: str
        """
        # be sure that autoflushing/expire_on_commit/autocommit are false
        soup_db = SqlSoup(self.engine,
            session=scoped_session(sessionmaker(autoflush=False,
            expire_on_commit=False, autocommit=False)))
        soup_db.schema = schema
        soup_db.catalog.relate('surface', soup_db.surface)
        soup_db.catalog.relate('magnitude', soup_db.magnitude)
        return soup_db
예제 #15
0
def main():
    usagestr = "%prog [OPTIONS] <config file>"
    parser = OptionParser(usage=usagestr)
    parser.add_option("-v",
                      "--verbose",
                      dest="verbose",
                      action="store_true",
                      default=False,
                      help="turn on verbose logging")

    options, args = parser.parse_args()

    if options.verbose:
        LOG.basicConfig(level=LOG.DEBUG)
    else:
        LOG.basicConfig(level=LOG.WARN)

    if len(args) != 1:
        parser.print_help()
        sys.exit(1)

    config_file = args[0]
    conf = config.parse(config_file)
    integ_br = conf.OVS.integration_bridge
    root_helper = conf.AGENT.root_helper
    options = {"sql_connection": conf.DATABASE.sql_connection}
    db = SqlSoup(options["sql_connection"])

    LOG.info("Connecting to database \"%s\" on %s", db.engine.url.database,
             db.engine.url.host)
    plugin = OVSQuantumOFPRyuAgent(integ_br, db, root_helper)
    plugin.daemon_loop(db)

    sys.exit(0)
예제 #16
0
def connect(url):
    global db
    engine = sa.create_engine(url, pool_recycle=30, echo_pool=True, poolclass=SingletonThreadPool)
    db = SqlSoup(engine)

    global goodNameClause
    goodNameClause = db.machines.is_active == 1
예제 #17
0
def getInactiveMachines(statusdb_url, initial_time, start_time, end_time):
    """Returns a list of slave machines that have been active between
    initial_time and end_time, but haven't been active between start_time and
    end_time"""
    db = SqlSoup(statusdb_url)

    q = sa.select([db.slaves.id, db.slaves.name],
                  sa.and_(
                      sa.not_(
                          sa.exists(
                              sa.select([db.builds.slave_id],
                                        sa.and_(
                                            db.builds.starttime >= start_time,
                                            db.builds.endtime <= end_time,
                                            db.builds.slave_id == db.slaves.id,
                                        )))),
                      sa.exists(
                          sa.select([db.builds.slave_id],
                                    sa.and_(
                                        db.builds.starttime >= initial_time,
                                        db.builds.starttime <= end_time,
                                        db.builds.slave_id == db.slaves.id,
                                    )))))

    return [row['name'] for row in q.execute()]
예제 #18
0
def main(infd, unix_time, dry_run = False):
    # Extract a UTC datetime object
    utc_datetime = datetime.datetime.utcfromtimestamp(unix_time)
    # Connect to the DB; if we can't, this will fail anyway.
    db = SqlSoup(dbconfig.dburl)
    # Scrape the results we just wgetted
    license2count = parse(infd)
    # Prepare any remaining DB columns, and write a CSV summary file
    extra_data = {
        'utc_time_stamp': utc_datetime,
        'site': 'http://www.flickr.com/'}
    importable = []
    csv_values = {}
    license2flickr = dict((v,k) for k,v in flickr2license.items())
    for key in license2count:
        row = {}
        row.update(extra_data)
        row['count'] = license2count[key]
        row['license_uri'] = key
        importable.append(row)
        csv_values[license2flickr[key]] = row['count']
    if dry_run:
        print importable
        print csv_values
    else:
        # Write data to CSV file
        csv = open('./data/%s.csv.imported' % utc_datetime.date(), 'w')
        for license, count in csv_values.items():
            csv.write('%s,%d\n' % (license,count))
        csv.close()
        # Write data to database
        counts = {}
        for row in importable:
            db.site_specific.insert(**row)
            db.flush()
            counts[row['license_uri'].split('/')[4]] = row['count']

        # Sort by license code
        lic_sorted = sorted(counts)
        # Join counts (sorted) with a comma
        cnt_sorted = ','.join(map(str, [counts[key] for key in lic_sorted]))
        # Write csv data to big historical file.  WARNING: this presupposes
        # that license version and jurisdiction never change on Flickr
        hist = open('./data/counts-historical-Flickr.csv', 'a')
        hist.write(str(utc_datetime.date()) + ',Flickr,2.0,Unported,' + cnt_sorted + '\n')
        hist.close()
예제 #19
0
    def start(self):
        super(MySQLBind9Backend, self).start()

        if cfg.CONF[self.name].write_database:
            self._engine = get_engine(self.name)
            self._db = SqlSoup(self._engine)

        self._sync_domains()
예제 #20
0
class GenericFillData(object):
    def __init__(self):
        self.azienda_destinazione = "latelier"
        self.database = "promogest_db"
        self.port = "5432"
        self.user = "******"
        self.password = "******"
        self.host = "localhost"
        self.file_csv = "aliquota_iva.csv"
        self.mainSchema = "promogest2"
        self.connection()

    def connection(self):
        engine = create_engine('postgres:'+'//'+self.user+':'
                            + self.password+ '@'
                            + self.host + ':'
                            + self.port + '/'
                            + self.database,
                            encoding='utf-8',
                            convert_unicode=True )

        engine.echo = True
        meta = MetaData(engine)
        self.pg_db_dest = SqlSoup(meta)
        self.pg_db_dest.schema = self.azienda_destinazione
        self.readFile()
    
    def readFile(self):
        spamReader = csv.reader(open(self.file_csv), delimiter=';', quotechar='"')
        self.fillDataContact(spamReader)
    
    def fillDataContact(self,spamReader):
        for row in spamReader:
            _art = self.pg_db_dest.aliquota_iva()
            _art.id=row[0]
            _art.denominazione_breve=row[1]
            _art.denominazione=row[2]
            _art.percentuale = row[3]
            _art.percentuale_detrazione = row[4]
            _art.descrizione_detrazione = row[5]
            _art.id_tipo = row[6]

            sqlalchemy.ext.sqlsoup.Session.add(_art)
            sqlalchemy.ext.sqlsoup.Session.commit()

        self.pg_db_dest.flush()
예제 #21
0
def main(license,query):
    # Extract a UTC datetime object
    utc_datetime = datetime.datetime.utcnow()
    # Connect to the DB; if we can't, this will fail anyway.
    db = SqlSoup(dbconfig.dburl)
    # Scrape for CC-BY and all count.
    count = scrape_youtube(query)

    # Build a db row
    row = {}
    row['utc_time_stamp'] = utc_datetime
    row['site'] = 'http://www.youtube.com/'
    row['license_uri'] = license
    row['count'] = count

    # Insert count into site_specific table
    db.site_specific.insert(**row)
    db.flush()
예제 #22
0
    def _sql_soup_init(self, schema):
        """
            Gets the schema to connect
            to the db, creates a SqlSoup instance, sets the schema

            :param schema: database schema
            :type schema: str
        """
        # be sure that autoflushing/expire_on_commit/autocommit are false
        soup_db = SqlSoup(self.engine,
                          session=scoped_session(
                              sessionmaker(autoflush=False,
                                           expire_on_commit=False,
                                           autocommit=False)))
        soup_db.schema = schema
        soup_db.catalog.relate('surface', soup_db.surface)
        soup_db.catalog.relate('magnitude', soup_db.magnitude)
        return soup_db
예제 #23
0
def main(query):
    # Extract a UTC datetime object
    utc_datetime = datetime.datetime.utcnow()
    # Connect to the DB; if we can't, this will fail anyway.
    db = SqlSoup(dbconfig.dburl)
    # Scrape for PDM count
    count = scrape_europeana_pdm(query)

    # Build a db row
    row = {}
    row['utc_time_stamp'] = utc_datetime
    row['site'] = 'http://europeana.eu/'
    row['license_uri'] = 'http://creativecommons.org/publicdomain/mark/1.0/'
    row['count'] = count

    # Insert count into site_specific table
    db.site_specific.insert(**row)
    db.flush()
예제 #24
0
    def loadConfig(self, config, conf):
        '''
        Load the config from conf.
        
        :param config: The configuration from the Config Table
        :type config: dict
        :param conf: the instance of the configuration
        :type conf: string 
        
        The information which config entries we need to load is taken from
            manage.js: function save_sql_config
                    
        '''
        self.resolverId = conf
        self.server = self.getConfigEntry(config, 'privacyidea.sqlresolver.Server', conf)
        self.driver = self.getConfigEntry(config, 'privacyidea.sqlresolver.Driver', conf)
        self.database = self.getConfigEntry(config, 'privacyidea.sqlresolver.Database', conf)
        self.port = self.getConfigEntry(config, 'privacyidea.sqlresolver.Port', conf, required=False)
        self.limit = self.getConfigEntry(config, 'privacyidea.sqlresolver.Limit', conf, required=False, default=100)
        self.user = self.getConfigEntry(config, 'privacyidea.sqlresolver.User', conf)
        self.password = self.getConfigEntry(config, 'privacyidea.sqlresolver.Password', conf, required=False)
        self.table = self.getConfigEntry(config, 'privacyidea.sqlresolver.Table', conf)
        usermap = self.getConfigEntry(config, 'privacyidea.sqlresolver.Map', conf)
        self.map = yaml.load(usermap)
        self.reverse_map = dict([[v,k] for k,v in self.map.items()])
        self.where = self.getConfigEntry(config, 'privacyidea.sqlresolver.Where', conf, required=False)
        self.encoding = self.getConfigEntry(config, 'privacyidea.sqlresolver.Encoding', conf, required=False, default="latin1")
        self.conParams = self.getConfigEntry(config, 'privacyidea.sqlresolver.conParams', conf, required=False)      
        
        
        # create the connectstring like
        # driver://user:passwd@seerver/database?conParams
        port = ""
        password = ""
        if self.port:
            port = ":%s" % self.port
        if self.password:
            password = "******" % self.password
        self.connect_string = "%s://%s%s@%s%s/%s?%s" % (self.driver,
                                                        self.user,
                                                        password,
                                                        self.server,
                                                        port,
                                                        self.database,
                                                        self.conParams)
        log.info("using the connect string %s" % self.connect_string)
        self.engine = create_engine(self.connect_string, encoding=str(self.encoding))
        # create a configured "Session" class
        Session = sessionmaker(bind=self.engine)

        # create a Session
        self.session = Session()
        self.db = SqlSoup(self.engine)
        self.TABLE = self.db.entity(self.table)
        
        return self
예제 #25
0
 def __init__(self, _db_url=DB_URL):
     self.db_url = _db_url
     # main solexa database
     self.solexa = SqlSoup(_db_url)
     (self.db_host, self.solexa_db_name) = re.compile(
         Lims.db_url_pattern).findall(_db_url)[0]
     # other lims databases
     self.lims = SqlSoup(Lims.db_url_string % {
         'host': self.db_host,
         'db_name': Lims.lims_db_name
     })
     self.request = SqlSoup(Lims.db_url_string % {
         'host': self.db_host,
         'db_name': Lims.request_db_name
     })
     self.general = SqlSoup(Lims.db_url_string % {
         'host': self.db_host,
         'db_name': Lims.general_db_name
     })
예제 #26
0
def suggestFromTable(sessionname, name, columnname, others=[], schema="public", tip="", **filters):
    """
    utile per l'implementazione di semplici servizi di auto-suggest da tabella.
    sessionname: neme della sessione
    name: nome della tabella
    columnname: nome della colonna da cui attingere
    others: altre colonne cui si è interessati al valore. Usare '' per tutte.
    schema: nome dello schema di appartenenza della tabella
    tip: "suggerimento"
    filters: filtri aggiuntivi del tipo <chiave>=<valore>
    """

    session = get_session(sessionname)
    engine = session.get_bind()
    db = SqlSoup(engine)
    table = db.entity(name, schema=schema)

    if isinstance(others, (list, tuple)):
        # you can submit a list of column values to return, at least an empty list
        query = session.query(*[table.c[col] for col in [columnname] + list(others)])
    elif others == "filters_only":
        # easter egg: useful?
        query = session.query(*[table.c[col] for col in [columnname] + filters.keys()])
    else:
        # otherwise all columns will be returned
        query = table

    column = table.c[columnname]
    tip = tip.rstrip()

    # ilike '%(tip)s%%'
    where = or_(
        column.startswith(tip),
        column.startswith(tip.capitalize()),
        column.startswith(tip.title()),
        column.startswith(tip.lower()),
        column.startswith(tip.upper()),
    )
    # other simple filters
    where = and_(where, *[(table.c[k] == v) for k, v in filters.items() if k in table.c.keys()])

    return [row.__dict__ for row in query.filter(where).all()]
예제 #27
0
파일: api_db.py 프로젝트: zoomis/ryu
    def __init__(self):
        # Create any tables that don't already exist
        self.createTables()

        self.db = SqlSoup(FLAGS.api_db_url)
        self.db_nets = self.db.networks
        self.db_ports = self.db.ports
        self.db_macs = self.db.macs
        self.db_bonds = self.db.bonds
        self.db_flowspace = self.db.flowspace
        self.db_net2slice = self.db.delegated_nets
예제 #28
0
def migrateVersion4():
    log.info('Upgrading DB to version 4.')

    # for some normal executions
    db = SqlSoup(engine)

    try:
        db.execute('ALTER TABLE MovieETA ADD lastCheck INTEGER')
        log.info('Added lastCheck to MovieETA table')
    except OperationalError:
        log.debug('Column lastCheck already added.')

    try:
        db.execute('ALTER TABLE MovieQueue ADD lastCheck INTEGER')
        log.info('Added lastCheck to MovieQueue table')
    except OperationalError:
        log.debug('Column lastCheck already added.')

    Session.add(DbVersion(4))
    Session.flush()
예제 #29
0
    def connection(self):
        engine = create_engine('postgres:'+'//'+USER+':'
                            + PASSWORD+ '@'
                            + HOST + ':'
                            + PORT + '/'
                            + DATABASE,
                            encoding='utf-8',
                            convert_unicode=True )

        engine.echo = True
        meta = MetaData(engine)
        self.pg_db_dest = SqlSoup(meta)
        self.pg_db_dest.schema = AZIENDA_DESTINAZIONE
        self.readFile()
예제 #30
0
    def connection(self):
        engine = create_engine('postgres:'+'//'+self.user+':'
                            + self.password+ '@'
                            + self.host + ':'
                            + self.port + '/'
                            + self.database,
                            encoding='utf-8',
                            convert_unicode=True )

        engine.echo = True
        meta = MetaData(engine)
        self.pg_db_dest = SqlSoup(meta)
        self.pg_db_dest.schema = self.azienda_destinazione
        self.readFile()
예제 #31
0
class GenericFillData(object):
    def __init__(self):
        """ QUESTO script permette di importare la tabella inventario così come
            esportata dal modulo stesso. Il funzionamento è semplice:
            si lancia lo script con "python nomescript" avendo cura che
            il file csv si trovi nella stessa cartella dello script stesso

            schema csv :
               riga = ('Codice, Descrizione, Quantita\', Valore unitario, U.M., ' +
                    'Codice a barre, Famiglia, Categoria,Anno ,idMagazzino , idArticolo ,data_aggiornamento\n')
            schema tabella:
            anno ,id_magazzino , id_articolo , quantita , valore_unitario,data_aggiornamento
        """
        self.connection()

    def connection(self):
        engine = create_engine('postgres:'+'//'+USER+':'
                            + PASSWORD+ '@'
                            + HOST + ':'
                            + PORT + '/'
                            + DATABASE,
                            encoding='utf-8',
                            convert_unicode=True )

        engine.echo = True
        meta = MetaData(engine)
        self.pg_db_dest = SqlSoup(meta)
        self.pg_db_dest.schema = AZIENDA_DESTINAZIONE
        self.readFile()

    def readFile(self):
        spamReader = csv.reader(open(FILE_CSV), delimiter=DELIMITER, quotechar=QUOTECHAR)
        self.fillDataContact(spamReader)

    def fillDataContact(self,spamReader):
        for row in spamReader:
            quantita = float(row[2].strip().replace(",","."))
            if quantita >0:
                _field = self.pg_db_dest.inventario()
                _field.anno=int(row[8])
                _field.id_magazzino = int(row[9])
                _field.id_articolo = int(row[10])

                _field.quantita = quantita
                _field.valore_unitario = float(row[3].strip().replace(",","."))
                _field.data_aggiornamento = row[11] or None
                sqlalchemy.ext.sqlsoup.Session.add(_field)
                sqlalchemy.ext.sqlsoup.Session.commit()
예제 #32
0
def main():
    global agent_port
    global db_url
    PreConfig()
    #read db for ip and supression
    options = {"sql_connection": db_url}
    db = SqlSoup(options["sql_connection"])
    ips = db.ip_port.all()
    supressions = db.supression.all()
    db.commit()
    sup = {}
#{src_ip:{"supress":sup, "timeleft":time}
    for supress in supressions:
        sup[supress.src_ip] = "20"
#setup sockets
    sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.bind((server_ip, int(server_port)))
    sock.listen(100)
    while True:  
        print time.clock()
        connection,address = sock.accept()  
        try:  
            connection.settimeout(5)  
            buf = connection.recv(1024)  
            flow2 = json.loads(buf)
            #flow2 = {u'src_ip': u'10.10.0.5', u'supression': 0}
            print "Server Received: %s " %(flow2)
            try:
                src_ip = flow2['src_ip']
                supression = flow2['supression'] 
                if src_ip in sup:
                    print "in sup, modify: " + src_ip
                    db.supression.get(src_ip).supression = supression
                    db.supression.get(src_ip).time = int(time.time())
                    db.commit()
                else:
                    print "not in, adding: " + src_ip
                    print supression
                    db.supression.insert(src_ip=src_ip,supression=supression, time=int(time.time()))
                    sup[src_ip]=supression
                    db.commit()
            except Exception:
                
                print "f**k,sql wrong?"
        except socket.timeout:  
            print 'time out'  
        except:
            print 'well, something wrong'
        connection.close()   
예제 #33
0
def main():
    cfg.CONF(args=sys.argv, project='quantum')

    # (TODO) gary - swap with common logging
    logging_config.setup_logging(cfg.CONF)

    integ_br = cfg.CONF.OVS.integration_bridge
    root_helper = cfg.CONF.AGENT.root_helper
    options = {"sql_connection": cfg.CONF.DATABASE.sql_connection}
    db = SqlSoup(options["sql_connection"])

    LOG.info("Connecting to database \"%s\" on %s", db.engine.url.database,
             db.engine.url.host)
    plugin = OVSQuantumOFPRyuAgent(integ_br, db, root_helper)
    plugin.daemon_loop(db)

    sys.exit(0)
예제 #34
0
    def daemon_loop(self):
        old_vlan_bindings = {}
        old_port_bindings = []
        self.db_connected = False

        while True:
            if not self.db_connected:
                time.sleep(self.reconnect_interval)
                db = SqlSoup(self.db_connection_url)
                self.db_connected = True
                LOG.info("Connecting to database \"%s\" on %s" %
                         (db.engine.url.database, db.engine.url.host))
            bindings = self.manage_networks_on_host(db, old_vlan_bindings,
                                                    old_port_bindings)
            old_vlan_bindings = bindings[VLAN_BINDINGS]
            old_port_bindings = bindings[PORT_BINDINGS]
            time.sleep(self.polling_interval)
예제 #35
0
 def connection(self):
     if self.tipo == "postgresql":
         engine = create_engine('postgres:'+'//'+self.user+':'
                             + self.password+ '@'
                             + self.host + ':'
                             + self.port + '/'
                             + self.db_name,
                             encoding='utf-8',
                             convert_unicode=True )
     else:
         self.azienda = None
         self.mainSchema = None
         engine =create_engine("sqlite:///"+self.db_path+"db",listeners=[SetTextFactory()],proxy=MyProxy())
     engine.echo = False
     meta = MetaData(engine)
     self.pg_db_dest = SqlSoup(meta)
     self.pg_db_dest.schema = self.azienda
     self.readFile()
예제 #36
0
 def connectDbRemote(self):
     """ effettua la connessione al DB remoto """
     engine = create_engine('postgres:'+'//'
                             +USER_REMOTO+':'
                             + PASSWORD_REMOTO+ '@'
                             + HOST_REMOTO + ':'
                             + PORT_REMOTO + '/'
                             + DATABASE_REMOTO,
                             encoding='utf-8',
                             convert_unicode=True )
     tipo_eng = engine.name
     engine.echo = False
     self.metaRemote = MetaData(engine)
     self.pg_db_server_remote = SqlSoup(self.metaRemote)
     self.pg_db_server_remote.schema = SCHEMA_REMOTO
     self.pg_db_server_main_remote = SqlSoup(self.metaRemote)
     self.pg_db_server_main_remote.schema = MAINSCHEMA_REMOTO
     SessionRemote = scoped_session(sessionmaker(bind=engine))
     self.sessionRemote = SessionRemote()
     print ">>>> CONNESSO AL DB REMOTO : %s IP: %s PORTA: %s SCHEMA %s <<<<< " %(DATABASE_REMOTO, HOST_REMOTO, PORT_REMOTO, SCHEMA_REMOTO)
예제 #37
0
파일: SincroDB.py 프로젝트: fmarella/pg3
    def connectDbLocale(self):
        """ effettua la connessione al DB locale """
        SessionLocale = None
        self.engineLocale = None
        self.metaLocale = None
#        self.sessionLocale.close_all()
#        self.engineLocale.dispose() # added this
#        del self.sessionLocale
#        del self.engineLocale


        mainschema_locale = Environment.conf.SincroDB.mainschema_locale
        user_locale = Environment.conf.SincroDB.user_locale
        password_locale = Environment.conf.SincroDB.password_locale
        host_locale = Environment.conf.SincroDB.host_locale
        port_locale = Environment.conf.SincroDB.port_locale
        database_locale = Environment.conf.SincroDB.database_locale

        engineLocale = create_engine('postgres:'+'//'
                                        +user_locale+':'
                                        + password_locale+ '@'
                                        + host_locale + ':'
                                        + port_locale + '/'
                                        + database_locale,
                                        encoding='utf-8',
                                        convert_unicode=True )
        tipo_eng = engineLocale.name
        engineLocale.echo = False
        self.metaLocale = MetaData(engineLocale)
        self.pg_db_server_locale = SqlSoup(self.metaLocale)
        self.pg_db_server_locale.schema = Environment.params["schema"]
        self.pg_db_server_main_locale = SqlSoup(self.metaLocale)
        self.pg_db_server_main_locale.schema = mainschema_locale
        SessionLocale = scoped_session(sessionmaker(bind=engineLocale))
        self.engineLocale = engineLocale
        self.sessionLocale = SessionLocale()
        print ">>>> CONNESSO AL DB LOCALE : %s IP: %s PORTA: %s SCHEMA %s <<<<< " %(database_locale, host_locale, port_locale, Environment.params["schema"])
예제 #38
0
def migrateVersion4():
    log.info('Upgrading DB to version 4.')

    # for some normal executions
    db = SqlSoup(engine)

    try:
        db.execute('ALTER TABLE MovieETA ADD lastCheck INTEGER')
        log.info('Added lastCheck to MovieETA table')
    except OperationalError:
        log.debug('Column lastCheck already added.')

    try:
        db.execute('ALTER TABLE MovieQueue ADD lastCheck INTEGER')
        log.info('Added lastCheck to MovieQueue table')
    except OperationalError:
        log.debug('Column lastCheck already added.')

    Session.add(DbVersion(4))
    Session.flush()
예제 #39
0
파일: dbclient.py 프로젝트: lbolla/junk
def main():
    db_name = sys.argv[1]
    db = SqlSoup(db_name)
    while True:
        try:
            cmd = raw_input(prompt)
            if not cmd:
                continue
            if cmd.lower() in ("help", "?"):
                show_help()
                continue
            sql = get_sql(cmd, db.engine.name) or cmd
            res = db.execute(sql)
            print_result(res)
            db.commit()

        except (SystemExit, KeyboardInterrupt, EOFError):
            print "\nBye!\n"
            return 0

        except Exception:
            traceback.print_exc()
            db.rollback()
예제 #40
0
def connect_db(filename):

    db = SqlSoup('sqlite:///%s' % filename)

    return db
예제 #41
0
def migrateVersion2():
    log.info('Upgrading DB to version 2.')

    # for some normal executions
    db = SqlSoup(engine)

    # Remove not used table
    try:
        db.execute('DROP TABLE Feed')
        log.info('Removed old Feed table.')
    except (OperationalError, NoSuchTableError):
        log.debug('No Feed table found.')

    # History add column
    try:
        db.execute('DROP TABLE History')
        log.info('Removed History table.')
    except (OperationalError, NoSuchTableError):
        log.debug('No History table found.')

    # RenameHistory add column
    try:
        Session.query(RenameHistory).filter_by(movieQueue='').all()
        log.debug('Column "RenameHistory:movieQueue" exists, not necessary.')
    except (OperationalError, NoSuchTableError):
        db.execute("CREATE TABLE RenameHistoryBackup(id, movieId, old, new);")
        db.execute(
            "INSERT INTO RenameHistoryBackup SELECT id, movieId, old, new FROM RenameHistory;"
        )
        db.execute("DROP TABLE RenameHistory;")
        db.execute(
            "CREATE TABLE RenameHistory (id, movieQueue, old VARCHAR, new VARCHAR);"
        )
        db.execute(
            "INSERT INTO RenameHistory SELECT id, movieId, old, new FROM RenameHistoryBackup;"
        )
        db.execute("DROP TABLE RenameHistoryBackup;")
        log.info('Added "movieQueue" column to existing RenameHistory Table.')

    # Mark all history

    # Quality from string to int
    movies = Session.query(Movie).all()
    for movie in movies:

        # Add moviequeues
        log.info('Making Queue item for %s' % movie.name)
        queue = MovieQueue()
        queue.movieId = movie.id
        queue.qualityType = movie.quality if movie.quality else 'dvdrip'  #just for backup
        queue.order = 1
        queue.active = (movie.status != u'deleted')
        queue.completed = (movie.status != u'want')
        queue.markComplete = True
        Session.add(queue)
        Session.flush()

        log.info('Doing some stuff to RenameHistory')
        history = Session.query(RenameHistory).filter_by(
            movieQueue=movie.id).first()
        if history:
            history.movieQueue = queue.id
            queue.name = os.path.basename(os.path.dirname(history.old))
            Session.flush()

    Session.add(DbVersion(1))  # Add version 1 for teh nice
    Session.add(DbVersion(2))
    Session.flush()
예제 #42
0
# -*- coding: utf-8 -*-

# Este módulo cuida das importações e funções
# usadas pelos outros módulos.

import csv, os, re, types
from optparse import OptionParser
from time import localtime, strftime
from ConfigParser import SafeConfigParser
from decimal import Decimal
from sqlalchemy.ext.sqlsoup import SqlSoup
import MySQLdb

# Conecta ao banco de dados
db = SqlSoup("mysql://*****:*****@localhost/cobranca")
connection = MySQLdb.connect(db='cobranca', user='******', passwd='caugm')
cursor = connection.cursor()

# Carrega a configuração
config = SafeConfigParser()
config.read("cobrador.ini")


def valor(valor, esconder_zero=True, largura=11):
    temp = list(("%.2f" % valor).replace('.', ','))
    v = temp.index(",")
    for i in xrange(v - 3, 0, -3):
        temp[i:i] = "."
    temp = "".join(temp)
    if temp == "0,00" and esconder_zero:
        temp = ""
예제 #43
0
    def _setup_sqlalchemy(self, path):
        """Establish foreign key relationships in the database and
        fire-up sqlsoup"""
        Session = sessionmaker()
        self.engine = create_engine('sqlite:///%s' % path)
        self.meta = MetaData(self.engine)
        self.connection = self.engine.connect()
        self.session = Session(bind=self.connection)

        Groups = Table('Groups', self.meta, autoload=True)

        Documents = Table('Documents', self.meta, autoload=True)

        RemoteDocuments = Table('RemoteDocuments', self.meta,
                                Column('groupId', Integer,
                                       ForeignKey('Groups.id')),
                                Column('remoteId', Integer,
                                       primary_key=True),
                                Column('documentId', Integer,
                                       ForeignKey('Documents.id'),
                                       primary_key=True),
                                autoload=True)

        DocumentContributors = Table('DocumentContributors', self.meta,
                                     Column('documentId', Integer,
                                            ForeignKey('Documents.id')),
                                     autoload=True)

        DocumentUrls = Table('DocumentUrls', self.meta,
                             Column('documentId', Integer,
                                    ForeignKey('Documents.id'),
                                    primary_key=True),
                             autoload=True)

        DocumentFolders = Table('DocumentFolders', self.meta,
                            Column('documentId', Integer,
                                   ForeignKey('Documents.id'),
                                   primary_key=True),
                            Column('folderId', Integer,
                                   ForeignKey('Folders.id'),
                                   primary_key=True),
                            autoload=True)

        DocumentTags = Table('DocumentTags', self.meta,
                            Column('documentId', Integer,
                                   ForeignKey('Documents.id'),
                                   primary_key=True),
                            Column('tag', String,
                                    primary_key=True),
                            autoload=True)

        Folders = Table('Folders', self.meta,
                        autoload=True)

        self.db = SqlSoup(self.meta)

        self.db.Documents.relate('contributors', self.db.DocumentContributors, lazy=True)
        self.db.Documents.relate('url', self.db.DocumentUrls)
        self.db.Documents.relate('tags', self.db.DocumentTags)

        self.f_doc_alive = and_(self.db.Documents.onlyReference != 1,
                                self.db.Documents.deletionPending != 1)
예제 #44
0
    def daemon_loop(self, db_connection_url):
        """Main processing loop (not currently used).

        :param options: database information - in the event need to reconnect
        """
        old_local_bindings = {}
        old_vif_ports = {}
        self.db_connected = False

        while True:
            if not self.db_connected:
                time.sleep(self.reconnect_interval)
                db = SqlSoup(db_connection_url)
                self.db_connected = True
                LOG.info('Connecting to database "%s" on %s' % (db.engine.url.database, db.engine.url.host))

            # Get bindings from db.
            all_bindings = self.get_db_port_bindings(db)
            if not self.db_connected:
                continue
            all_bindings_vif_port_ids = set(all_bindings.keys())
            lsw_id_bindings = self.get_db_vlan_bindings(db)
            if not self.db_connected:
                continue

            # Get bindings from OVS bridge.
            vif_ports = self.int_br.get_vif_ports()
            new_vif_ports = dict([(p.vif_id, p) for p in vif_ports])
            new_vif_ports_ids = set(new_vif_ports.keys())

            old_vif_ports_ids = set(old_vif_ports.keys())
            dead_vif_ports_ids = new_vif_ports_ids - all_bindings_vif_port_ids
            dead_vif_ports = [new_vif_ports[p] for p in dead_vif_ports_ids]
            disappeared_vif_ports_ids = old_vif_ports_ids - new_vif_ports_ids
            new_local_bindings_ids = all_bindings_vif_port_ids.intersection(new_vif_ports_ids)
            new_local_bindings = dict([(p, all_bindings.get(p)) for p in new_vif_ports_ids])
            new_bindings = set((p, old_local_bindings.get(p), new_local_bindings.get(p)) for p in new_vif_ports_ids)
            changed_bindings = set([b for b in new_bindings if b[2] != b[1]])

            LOG.debug("all_bindings: %s" % all_bindings)
            LOG.debug("lsw_id_bindings: %s" % lsw_id_bindings)
            LOG.debug("old_vif_ports_ids: %s" % old_vif_ports_ids)
            LOG.debug("dead_vif_ports_ids: %s" % dead_vif_ports_ids)
            LOG.debug("old_vif_ports_ids: %s" % old_vif_ports_ids)
            LOG.debug("new_local_bindings_ids: %s" % new_local_bindings_ids)
            LOG.debug("new_local_bindings: %s" % new_local_bindings)
            LOG.debug("new_bindings: %s" % new_bindings)
            LOG.debug("changed_bindings: %s" % changed_bindings)

            # Take action.
            for p in dead_vif_ports:
                LOG.info("No quantum binding for port " + str(p) + "putting on dead vlan")
                self.port_dead(p)

            for b in changed_bindings:
                port_id, old_port, new_port = b
                p = new_vif_ports[port_id]
                if old_port:
                    old_net_uuid = old_port.network_id
                    LOG.info("Removing binding to net-id = " + old_net_uuid + " for " + str(p) + " added to dead vlan")
                    self.port_unbound(p, old_net_uuid)
                    if not new_port:
                        self.port_dead(p)

                if new_port:
                    new_net_uuid = new_port.network_id
                    if new_net_uuid not in lsw_id_bindings:
                        LOG.warn("No ls-id binding found for net-id '%s'" % new_net_uuid)
                        continue

                    lsw_id = lsw_id_bindings[new_net_uuid]
                    try:
                        self.port_bound(p, new_net_uuid, lsw_id)
                        LOG.info(
                            "Port "
                            + str(p)
                            + " on net-id = "
                            + new_net_uuid
                            + " bound to "
                            + str(self.local_vlan_map[new_net_uuid])
                        )
                    except Exception as e:
                        LOG.info(
                            "Unable to bind Port "
                            + str(p)
                            + " on netid = "
                            + new_net_uuid
                            + " to "
                            + str(self.local_vlan_map[new_net_uuid])
                        )

            for vif_id in disappeared_vif_ports_ids:
                LOG.info("Port Disappeared: " + vif_id)
                old_port = old_local_bindings.get(vif_id)
                if old_port:
                    try:
                        self.port_unbound(old_vif_ports[vif_id], old_port.network_id)
                    except Exception:
                        LOG.info("Unable to unbind Port " + str(p) + " on net-id = " + old_port.network_uuid)

            old_vif_ports = new_vif_ports
            old_local_bindings = new_local_bindings
            try:
                db.commit()
            except Exception as e:
                LOG.info("Unable to commit to database! Exception: %s" % e)
                db.rollback()
                old_local_bindings = {}
                old_vif_ports = {}

            time.sleep(self.polling_interval)
예제 #45
0
    def daemon_loop(self, db_connection_url):
        self.local_vlan_map = {}
        old_local_bindings = {}
        old_vif_ports = {}
        db_connected = False

        while True:
            if not db_connected:
                time.sleep(self.reconnect_interval)
                db = SqlSoup(db_connection_url)
                db_connected = True
                LOG.info('Connecting to database "%s" on %s' % (db.engine.url.database, db.engine.url.host))

            all_bindings = {}
            try:
                ports = db.ports.all()
            except Exception as e:
                LOG.info("Unable to get port bindings! Exception: %s" % e)
                db_connected = False
                continue

            for port in ports:
                all_bindings[port.interface_id] = port

            vlan_bindings = {}
            try:
                vlan_binds = db.vlan_bindings.all()
            except Exception as e:
                LOG.info("Unable to get vlan bindings! Exception: %s" % e)
                db_connected = False
                continue

            for bind in vlan_binds:
                vlan_bindings[bind.network_id] = bind.vlan_id

            new_vif_ports = {}
            new_local_bindings = {}
            vif_ports = self.int_br.get_vif_ports()
            for p in vif_ports:
                new_vif_ports[p.vif_id] = p
                if p.vif_id in all_bindings:
                    net_id = all_bindings[p.vif_id].network_id
                    new_local_bindings[p.vif_id] = net_id
                else:
                    # no binding, put him on the 'dead vlan'
                    self.int_br.set_db_attribute("Port", p.port_name, "tag", DEAD_VLAN_TAG)
                    self.int_br.add_flow(priority=2, match="in_port=%s" % p.ofport, actions="drop")

                old_b = old_local_bindings.get(p.vif_id, None)
                new_b = new_local_bindings.get(p.vif_id, None)

                if old_b != new_b:
                    if old_b is not None:
                        LOG.info("Removing binding to net-id = %s for %s" % (old_b, str(p)))
                        self.port_unbound(p, True)
                        if p.vif_id in all_bindings:
                            all_bindings[p.vif_id].op_status = OP_STATUS_DOWN
                    if new_b is not None:
                        # If we don't have a binding we have to stick it on
                        # the dead vlan
                        net_id = all_bindings[p.vif_id].network_id
                        vlan_id = vlan_bindings.get(net_id, DEAD_VLAN_TAG)
                        self.port_bound(p, vlan_id)
                        if p.vif_id in all_bindings:
                            all_bindings[p.vif_id].op_status = OP_STATUS_UP
                        LOG.info(("Adding binding to net-id = %s " "for %s on vlan %s") % (new_b, str(p), vlan_id))

            for vif_id in old_vif_ports:
                if vif_id not in new_vif_ports:
                    LOG.info("Port Disappeared: %s" % vif_id)
                    if vif_id in old_local_bindings:
                        old_b = old_local_bindings[vif_id]
                        self.port_unbound(old_vif_ports[vif_id], False)
                    if vif_id in all_bindings:
                        all_bindings[vif_id].op_status = OP_STATUS_DOWN

            old_vif_ports = new_vif_ports
            old_local_bindings = new_local_bindings
            try:
                db.commit()
            except Exception as e:
                LOG.info("Unable to commit to database! Exception: %s" % e)
                db.rollback()
                old_local_bindings = {}
                old_vif_ports = {}

            time.sleep(self.polling_interval)
예제 #46
0
    def __init__(self, data_schema, db_url, metadata_table_name, scraper=None):
        self.scraper = scraper
        self.their_fields = copy.deepcopy(data_schema.their_fields)
        self.resolutions = data_schema.resolutions
        self.db_url = db_url

        self.our_fields = {
            'page_permalink': {
                'column': Column(String(1000, convert_unicode=True)),
            },
            'access_time': {
                'column': Column(Integer),
            },
            'doesnt_exist': {
                'column': Column(Boolean),
            },
            'we_couldnt_parse_it': {
                'column': Column(Boolean),
            },
            #'is_color = Column(Boolean)
        }

        resolutions_columns = []
        for resolution, data in self.resolutions.items():
            resolutions_columns.append((data['status_column_name'], {
                'column':
                Column(Boolean, default=False)
            }))
            resolutions_columns.append((data['url_column_name'], {
                'column':
                Column(String(1000, convert_unicode=True))
            }))
            resolutions_columns.append((data['too_big_column_name'], {
                'column':
                Column(Boolean, default=False)
            }))
        self.our_fields.update(dict(resolutions_columns))

        def column_type_to_column_obj(type):
            if type == 'string':
                return Column(Text(9000, convert_unicode=True))
            else:
                print "what the heck kind of type is that?!?!?!?"

        for index in self.their_fields.keys():
            self.their_fields[index]['column'] = column_type_to_column_obj(
                self.their_fields[index]['type'])

        ## glue all of the fields together
        self.all_fields = dict(self.their_fields.items() +
                               self.our_fields.items())

        ## generate the metadata class
        self.base = declarative_base()

        class OurMetadata(self.base):
            __tablename__ = data_schema.table_name
            id = Column(Integer, primary_key=True)

        for fieldname, fieldinfo in self.all_fields.items():
            setattr(OurMetadata, fieldname, fieldinfo['column'])

        ## create the db
        #self.db = SqlSoup(db_url + '?charset=utf8&use_unicode=0', expire_on_commit=True)
        from sqlalchemy.orm import scoped_session, sessionmaker
        self.db = SqlSoup(db_url + '?charset=utf8&use_unicode=0',
                          session=scoped_session(
                              sessionmaker(expire_on_commit=True)))
        self.db.engine.raw_connection().connection.text_factory = unicode

        # make the tables if they don't already exist
        self.base.metadata.create_all(self.db.engine)
        self.db.commit()

        # make it easier to grab metadata table object
        self.metadata_table = getattr(self.db, metadata_table_name)
        if not self.metadata_table:
            print "crap, something has gone really wrong. couldn't grab the metadata table"

        #TODO: i think that maybe i can remove this. but not sure. probs need for sqlite.
        self.db_lock = threading.Lock()
예제 #47
0
    options, args = parser.parse_args()

    if options.verbose:
        LOG.basicConfig(level=LOG.DEBUG)
    else:
        LOG.basicConfig(level=LOG.WARN)

    if len(args) != 1:
        parser.print_help()
        sys.exit(1)

    config_file = args[0]
    config = ConfigParser.ConfigParser()
    try:
        config.read(config_file)
    except Exception, e:
        LOG.error("Unable to parse config file \"%s\": %s" %
                  (config_file, str(e)))

    integ_br = config.get("OVS", "integration-bridge")

    options = {"sql_connection": config.get("DATABASE", "sql_connection")}
    db = SqlSoup(options["sql_connection"])

    LOG.info("Connecting to database \"%s\" on %s" %
             (db.engine.url.database, db.engine.url.host))
    plugin = OVSQuantumAgent(integ_br)
    plugin.daemon_loop(db)

    sys.exit(0)
예제 #48
0
파일: upgrade.py 프로젝트: wookiesh/domogik
 def __init__(self, engine):
     self.__db = SqlSoup(engine)
예제 #49
0
def connect(url):
    global db
    db = SqlSoup(url)

    global goodNameClause
    goodNameClause = db.machines.is_active == 1
예제 #50
0
class MySQLBind9Backend(base.Backend):
    __plugin_name__ = 'mysqlbind9'

    def get_url_data(self):
        url = _parse_rfc1738_args(cfg.CONF[self.name].database_connection)
        return url.translate_connect_args()

    def get_dns_table(self, table=None):
        """
        Get a Table object from SQLSoup

        :param table: Overridable table name
        """
        table = table or cfg.CONF[self.name].database_dns_table
        return getattr(self._db, table)

    def start(self):
        super(MySQLBind9Backend, self).start()

        if cfg.CONF[self.name].write_database:
            self._engine = get_engine(self.name)
            self._db = SqlSoup(self._engine)

        self._sync_domains()

    def _add_soa_record(self, domain, servers):
        """
        add the single SOA record for this domain. Must create the
        data from attributes of the domain
        """
        table = self.get_dns_table()
        data_rec = "%s. %s. %d %d %d %d %d" % (
            servers[0]['name'], domain['email'].replace(
                "@", "."), domain['serial'], domain['refresh'],
            domain['retry'], domain['expire'], domain['minimum'])

        # use the domain id for records that don't have a match
        # in designate's records table
        table.insert(tenant_id=domain['tenant_id'],
                     domain_id=domain['id'],
                     designate_rec_id=domain['id'],
                     name=domain['name'],
                     ttl=domain['ttl'],
                     type='SOA',
                     data=data_rec)
        self._db.commit()

    def _add_ns_records(self, domain, servers):
        """
        add the NS records, one for each server, for this domain
        """
        table = self.get_dns_table()

        # use the domain id for records that don't have a match
        # in designate's records table
        for server in servers:
            table.insert(tenant_id=domain['tenant_id'],
                         domain_id=domain['id'],
                         designate_rec_id=domain['id'],
                         name=domain['name'],
                         ttl=domain['ttl'],
                         type='NS',
                         data=server['name'])

        self._db.commit()

    def _insert_db_record(self, tenant_id, domain_id, record):
        """
        generic db insertion method for a domain record
        """
        table = self.get_dns_table()
        table.insert(tenant_id=tenant_id,
                     domain_id=domain_id,
                     designate_rec_id=record['id'],
                     name=record['name'],
                     ttl=record['ttl'],
                     type=record['type'],
                     data=record['data'])
        self._db.commit()

    def _update_ns_records(self, domain, servers):
        """
        delete and re-add all NS records : easier to just delete all
        NS records and then replace - in the case of adding new NS
        servers
        """
        table = self.get_dns_table()

        all_ns_rec = table.filter_by(tenant_id=domain['tenant_id'],
                                     domain_id=domain['id'],
                                     type=u'NS')

        # delete all NS records
        all_ns_rec.delete()
        # add all NS records (might have new servers)
        self._db.commit()

        self._add_ns_records(domain, servers)

    def _update_db_record(self, tenant_id, record):
        """
        generic domain db record update method
        """
        table = self.get_dns_table()

        q = table.filter_by(tenant_id=tenant_id,
                            domain_id=record['domain_id'],
                            designate_rec_id=record['id'])

        q.update({
            'ttl': record['ttl'],
            'type': record['type'],
            'data': record['data']
        })

        self._db.commit()

    def _update_soa_record(self, domain, servers):
        """
        update the one single SOA record for the domain
        """
        LOG.debug("_update_soa_record()")
        table = self.get_dns_table()

        # there will only ever be -one- of these
        existing_record = table.filter_by(tenant_id=domain['tenant_id'],
                                          domain_id=domain['id'],
                                          type=u'SOA')

        data_rec = "%s. %s. %d %d %d %d %d" % (
            servers[0]['name'], domain['email'].replace(
                "@", "."), domain['serial'], domain['refresh'],
            domain['retry'], domain['expire'], domain['minimum'])

        existing_record.update({
            'ttl': domain['ttl'],
            'type': u'SOA',
            'data': data_rec
        })

        self._db.commit()

#    def _update_domain_ttl(self, domain):
#        LOG.debug("_update_soa_record()")
#        table = self.get_dns_table()
#
#        # there will only ever be -one- of these
#        domain_records = table.filter_by(domain_id=domain['id'])
#
#        domain_records.update({'ttl': domain['ttl']})
#
#        self._db.commit()

    def _delete_db_record(self, tenant_id, record):
        """
        delete a specific record for a given domain
        """
        table = self.get_dns_table()
        LOG.debug("_delete_db_record")

        q = table.filter_by(tenant_id=tenant_id,
                            domain_id=record['domain_id'],
                            designate_rec_id=record['id'])

        q.delete()

        self._db.commit()

    def _delete_db_domain_records(self, tenant_id, domain_id):
        """
         delete all records for a given domain
         """
        LOG.debug('_delete_db_domain_records()')
        table = self.get_dns_table()

        # delete all records for the domain id
        q = table.filter_by(tenant_id=tenant_id, domain_id=domain_id)
        q.delete()

        self._db.commit()

    def create_domain(self, context, domain):
        LOG.debug('create_domain()')

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)

            self._add_soa_record(domain, servers)
            self._add_ns_records(domain, servers)

        self._sync_domains()

    def update_domain(self, context, domain):
        LOG.debug('update_domain()')

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)

            self._update_soa_record(domain, servers)
            self._update_ns_records(domain, servers)

    def delete_domain(self, context, domain):
        LOG.debug('delete_domain()')

        if cfg.CONF[self.name].write_database:
            self._delete_db_domain_records(domain['tenant_id'], domain['id'])

        self._sync_domains()

    def create_server(self, context, server):
        LOG.debug('create_server()')

        raise exceptions.NotImplemented('create_server() for '
                                        'mysqlbind9 backend is '
                                        'not implemented')
        """
        TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
        if cfg.CONF[self.name].write_database:
            domains = self.central_service.find_domains(self.admin_context)

            for domain in domains:
                self._add_ns_records(domain, server)

        self._sync_domains()
        """

#   This method could be a very expensive and should only be called
#   (e.g., from central) only if the name of the existing server is
#   changed.

    def update_server(self, context, server):
        LOG.debug('update_server()')

        raise exceptions.NotImplemented('update_server() for '
                                        'mysqlbind9 backend is '
                                        'not implemented')
        """
        TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)
            domains = self.central_service.find_domains(self.admin_context)

            for domain in domains:
                self._update_ns_records(domain, servers)

        self._sync_domains()
        """

    def delete_server(self, context, server):
        LOG.debug('delete_server()')

        raise exceptions.NotImplemented('delete_server() for '
                                        'mysqlbind9 backend is'
                                        ' not implemented')
        """
        TODO: For scale, Use bulk SQLAlchemy (core) queries
        """

    def create_record(self, context, domain, record):
        LOG.debug('create_record()')
        if cfg.CONF[self.name].write_database:
            self._insert_db_record(domain['tenant_id'], domain['id'], record)

    def update_record(self, context, domain, record):
        LOG.debug('update_record()')
        if cfg.CONF[self.name].write_database:
            self._update_db_record(domain['tenant_id'], record)

    def delete_record(self, context, domain, record):
        LOG.debug('Delete Record')
        if cfg.CONF[self.name].write_database:
            self._delete_db_record(domain['tenant_id'], record)

    def _sync_domains(self):
        """
        Update the zone file and reconfig rndc to update bind.
        Unike regular bind, this only needs to be done upon adding
        or deleting domains as mysqlbind takes care of updating
        bind upon regular record changes
        """
        LOG.debug('Synchronising domains')

        domains = self.central_service.find_domains(self.admin_context)

        output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
                                     'bind9')

        # Create the output folder tree if necessary
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)

        output_path = os.path.join(output_folder, 'zones.config')

        abs_state_path = os.path.abspath(cfg.CONF.state_path)

        LOG.debug("Getting ready to write zones.config at %s" % output_path)

        # NOTE(CapTofu): Might have to adapt this later on?
        url = self.get_url_data()
        utils.render_template_to_file(
            'mysql-bind9-config.jinja2',
            output_path,
            domains=domains,
            state_path=abs_state_path,
            dns_server_type=cfg.CONF[self.name].dns_server_type,
            dns_db_schema=url['database'],
            dns_db_table=cfg.CONF[self.name].database_dns_table,
            dns_db_host=url['host'],
            dns_db_user=url['username'],
            dns_db_password=url['password'])

        # only do this if domain create, domain delete
        rndc_call = [
            'rndc',
            '-s',
            cfg.CONF[self.name].rndc_host,
            '-p',
            str(cfg.CONF[self.name].rndc_port),
        ]

        if cfg.CONF[self.name].rndc_config_file:
            rndc_call.extend(['-c', self.config.rndc_config_file])

        if cfg.CONF[self.name].rndc_key_file:
            rndc_call.extend(['-k', self.config.rndc_key_file])

        rndc_call.extend(['reconfig'])

        utils.execute(*rndc_call)
예제 #51
0
class DB:
    ##### INSTANTIATION
    def __init__(self, data_schema, db_url, metadata_table_name, scraper=None):
        self.scraper = scraper
        self.their_fields = copy.deepcopy(data_schema.their_fields)
        self.resolutions = data_schema.resolutions
        self.db_url = db_url

        self.our_fields = {
            'page_permalink': {
                'column': Column(String(1000, convert_unicode=True)),
            },
            'access_time': {
                'column': Column(Integer),
            },
            'doesnt_exist': {
                'column': Column(Boolean),
            },
            'we_couldnt_parse_it': {
                'column': Column(Boolean),
            },
            #'is_color = Column(Boolean)
        }

        resolutions_columns = []
        for resolution, data in self.resolutions.items():
            resolutions_columns.append((data['status_column_name'], {
                'column':
                Column(Boolean, default=False)
            }))
            resolutions_columns.append((data['url_column_name'], {
                'column':
                Column(String(1000, convert_unicode=True))
            }))
            resolutions_columns.append((data['too_big_column_name'], {
                'column':
                Column(Boolean, default=False)
            }))
        self.our_fields.update(dict(resolutions_columns))

        def column_type_to_column_obj(type):
            if type == 'string':
                return Column(Text(9000, convert_unicode=True))
            else:
                print "what the heck kind of type is that?!?!?!?"

        for index in self.their_fields.keys():
            self.their_fields[index]['column'] = column_type_to_column_obj(
                self.their_fields[index]['type'])

        ## glue all of the fields together
        self.all_fields = dict(self.their_fields.items() +
                               self.our_fields.items())

        ## generate the metadata class
        self.base = declarative_base()

        class OurMetadata(self.base):
            __tablename__ = data_schema.table_name
            id = Column(Integer, primary_key=True)

        for fieldname, fieldinfo in self.all_fields.items():
            setattr(OurMetadata, fieldname, fieldinfo['column'])

        ## create the db
        #self.db = SqlSoup(db_url + '?charset=utf8&use_unicode=0', expire_on_commit=True)
        from sqlalchemy.orm import scoped_session, sessionmaker
        self.db = SqlSoup(db_url + '?charset=utf8&use_unicode=0',
                          session=scoped_session(
                              sessionmaker(expire_on_commit=True)))
        self.db.engine.raw_connection().connection.text_factory = unicode

        # make the tables if they don't already exist
        self.base.metadata.create_all(self.db.engine)
        self.db.commit()

        # make it easier to grab metadata table object
        self.metadata_table = getattr(self.db, metadata_table_name)
        if not self.metadata_table:
            print "crap, something has gone really wrong. couldn't grab the metadata table"

        #TODO: i think that maybe i can remove this. but not sure. probs need for sqlite.
        self.db_lock = threading.Lock()

    ### SERIALIZATION

    # serialize data before putting it in the database
    def prep_data_for_insertion(self, data_dict):
        if not data_dict:
            return data_dict
        for key, data in data_dict.items():
            if key in self.all_fields and 'serialize' in self.all_fields[
                    key] and self.all_fields[key]['serialize']:
                data_dict[key] = json.dumps(data_dict[key])
        return data_dict

    # de-serialize and decode to unicode the data after pulling it from the database
    def re_objectify_data(self, data_dict):
        if not data_dict:
            return data_dict
        data_dict['id'] = int(data_dict['id'])
        for key, data in data_dict.items():
            if key in self.all_fields and 'serialize' in self.all_fields[
                    key] and self.all_fields[key]['serialize']:
                if data_dict[key]:
                    data_dict[key] = json.loads(data_dict[key])
            else:
                data_dict[key] = to_unicode_or_bust(data_dict[key])
        return data_dict

    ##### READ-ONLY OPERATIONS

    ### BASE: Actually grab things from the database.
    ### many of the below functions use these
    # NOTE: careful about using this directly. it doesn't "uncompress" the data after pulling it from the db
    def get_image_metadata(self, id):
        #with self.db_lock:
        return self.metadata_table.get(id)

    def get_image_metadata_dict(self, id):
        # we run this through dict() so that we're manipulating a copy, not the actual object, which it turns out is cached or something
        row = self.get_image_metadata(id)
        if not row:
            return None
        row_dict = dict(row.__dict__)
        objectified_dict = self.re_objectify_data(row_dict)
        del objectified_dict[
            '_sa_instance_state']  # sqlalchemy throws this sucker in. dont want it.
        return objectified_dict

    def get_resolution_url_column_name(self, resolution):
        return self.resolutions[resolution]['url_column_name']

    def get_resolution_url_column(self, resolution):
        column_name = self.get_resolution_url_column_name(resolution)
        return getattr(self.db, column_name)

    def get_resolution_url(self, resolution, id):
        row = self.metadata_table.get(id)
        url_column_name = self.get_resolution_url_column_name(resolution)
        return getattr(row, url_column_name)

    #TODO: pretty sure these are the same function
    def get_resolution_image_url(self, id, resolution):
        metadata_url_column_name = self.resolutions[resolution][
            'url_column_name']
        url = getattr(self.metadata_table.get(id), metadata_url_column_name)
        return url

    def get_resolution_status_column_name(self, resolution):
        return self.resolutions[resolution]['status_column_name']

    def get_resolution_status_column(self, resolution):
        the_status_column_name = self.get_resolution_status_column_name(
            resolution)
        the_status_column = getattr(self.metadata_table,
                                    the_status_column_name)
        return the_status_column

    def get_resolution_status(self, id, resolution):
        dict = self.get_image_metadata_dict(id)
        column_name = self.get_resolution_status_column_name(resolution)
        return dict.get(column_name)

    def get_resolution_too_big_column_name(self, resolution):
        return self.resolutions[resolution]['too_big_column_name']

    def get_resolution_too_big_column(self, resolution):
        column_name = self.get_resolution_too_big_column_name(resolution)
        column = getattr(self.metadata_table, column_name)
        return column

    def get_is_marked_as_too_big(self, id, resolution):
        dict = self.get_image_metadata_dict(id)
        too_big_column_name = self.get_resolution_too_big_column_name(
            resolution)
        if dict[too_big_column_name]:
            return True
        return False

    def get_valid_images(self):
        criteria = []
        for resolution in self.resolutions.keys():
            criteria.append(
                self.get_resolution_status_column(resolution) == True)
        where = sqlalchemy.or_(*criteria)
        return self.metadata_table.filter(where)

    def get_next_successful_image_id(self, id):
        where = self.metadata_table.id > id
        higher_id = self.get_valid_images().filter(where).first()
        if not higher_id:
            return id
        retval = int(higher_id.id)
        return retval

    def get_prev_successful_image_id(self, id):
        where = self.metadata_table.id < id
        lower_id = self.get_valid_images().filter(where).order_by(
            sqlalchemy.desc(self.metadata_table.id)).first()
        if not lower_id:
            return id
        retval = int(lower_id.id)
        return retval

    ## input: resolution, as a string (hires, lores, thumb)
    ## returns: list of tuples in form: (id, url)
    def get_set_images_to_dl(self, resolution):
        the_status_column = self.get_resolution_status_column(resolution)
        where = sqlalchemy.or_(the_status_column == False,
                               the_status_column == None)
        #rows_to_dl = self.metadata_table.filter(where).filter(sqlalchemy.not_(self.get_resolution_too_big_column(resolution) == True)).all()
        rows_to_dl = self.metadata_table.filter(where).all()
        ids_to_dl = map(lambda row: row.id, rows_to_dl)
        metadata_url_column_name = self.get_resolution_url_column_name(
            resolution)
        tuples = map(
            lambda id:
            (id, getattr(self.metadata_table.get(id), metadata_url_column_name)
             ), ids_to_dl)
        # throw away tuples that have a null value in either position
        # TODO: maybe we should throw an exception here?
        tuples = filter(lambda tuple: tuple[0] and tuple[1], tuples)
        return tuples

    def get_highest_id_in_our_db(self):
        try:
            id = int(
                self.metadata_table.order_by(
                    sqlalchemy.desc(self.metadata_table.id)).first().id)
        except:
            id = 1
        return id

    def get_random_valid_image_id(self):
        possibilities = self.get_valid_images()
        num_possibilities = possibilities.count()
        choice = random.randrange(num_possibilities)
        return possibilities[choice].id

    def get_num_images(self):
        # yeah, the below where statement really sucks
        # i can't just filter by != True. it returns 0 results. i don't know why.
        mywhere = sqlalchemy.or_(
            self.metadata_table.we_couldnt_parse_it == False,
            self.metadata_table.we_couldnt_parse_it == None)
        return self.metadata_table.filter(mywhere).count()

    ##### WRITE-ONLY OPERATIONS

    ### BASE: Actually insert or update a row in the database
    ### many of the below functions use these
    #NOTE: this only works if the primary key is 'id'
    def insert_or_update_table_row(self, table, new_data_dict):
        if not new_data_dict:
            print "you're trying to insert a blank dict. that's pretty lame."
            return False
        # merge the new and the old into a fresh dict
        existing_row = table.get(new_data_dict['id'])
        if existing_row:
            existing_row_data_dict = existing_row.__dict__
            final_row_data_dict = existing_row_data_dict
            for key, value in new_data_dict.items():
                final_row_data_dict[key] = value
            #write over the current row contents with it
            #with self.db_lock:
            self.db.delete(existing_row)
            self.db.commit()
        else:
            final_row_data_dict = new_data_dict
        #with self.db_lock:
        table.insert(**final_row_data_dict)
        self.db.commit()

    def store_metadata_row(self, metadata_dict):
        if not metadata_dict.has_key('we_couldnt_parse_it'):
            metadata_dict['we_couldnt_parse_it'] = 0
        metadata_dict = self.prep_data_for_insertion(metadata_dict)
        self.insert_or_update_table_row(self.metadata_table, metadata_dict)

    def mark_img_as_not_downloaded(self, id, resolution):
        status_column_name = self.get_resolution_status_column_name(resolution)
        data = {}
        data['id'] = id
        data[status_column_name] = False
        self.store_metadata_row(data)

    def mark_img_as_downloaded(self, id, resolution):
        status_column_name = self.get_resolution_status_column_name(resolution)
        data = {}
        data['id'] = id
        data[status_column_name] = True
        self.store_metadata_row(data)

    def mark_img_as_too_big(self, id, resolution):
        status_column_name = self.get_resolution_too_big_column_name(
            resolution)
        data = {}
        data['id'] = id
        data[status_column_name] = True
        self.store_metadata_row(data)

    # DELETE EVERYTHING. CAREFUL!
    def truncate_all_tables(self):
        print "================================"
        print "LIKE SERIOUSLY I AM ABOUT TO DELETE ALL THE TABLES RIGHT NOW OH BOY"
        print self.db_url
        print "================================"
        meta = MetaData(self.db.engine)
        meta.reflect()
        meta.drop_all()
        meta.create_all()
        '''
        for table in reversed(self.base.metadata.sorted_tables):
            print table
            table.delete()
            self.db.commit()
        self.base.metadata.create_all(self.db.engine)
        self.db.commit()
        '''

    ### HELPERS

    def get_field_key_by_full_name(self, full_name):
        for key, data in self.their_fields.items():
            if not data['full_name']:
                continue
            if data['full_name'] == full_name:
                return key
        return False

    ##### OTHER
    def repr_as_html(self, image_as_dict,
                     image_resolution_to_local_file_location_fxn):
        if not image_as_dict:
            return u""
        floorified = usable_image_scraper.scraper.floorify(image_as_dict['id'])
        id_zfilled = str(image_as_dict['id']).zfill(5)
        image_urls = {}
        for resolution in self.resolutions:
            image_urls[
                resolution] = image_resolution_to_local_file_location_fxn(
                    resolution)

        # add link rel=license
        #image_as_dict['copyright'] = image_as_dict['copyright'].strip("'").replace('None', '<a href="http://creativecommons.org/licenses/publicdomain/" rel="license">None</a>')

        image_as_dict['next_id'] = int(image_as_dict['id']) + 1
        image_as_dict['prev_id'] = int(image_as_dict['id']) - 1

        image_as_dict['their_data'] = u''
        for key, data in self.their_fields.items():
            if not key in image_as_dict or not image_as_dict[key]:
                continue
            html_block = '<p class="datapoint">'
            # if there's a pre-perscribed way to represent this field:
            html_block += '<label for="' + key + '">' + self.their_fields[key][
                'full_name'] + ': </label>'
            rdfa_clause = ''
            if 'dc_mapping' in data:
                rdfa_clause = ' property="' + data['dc_mapping'] + '"'
            if 'repr_as_html' in data:
                html_block += data['repr_as_html'](image_as_dict[key])
            # if not:
            else:
                html_block += '<span id="' + key + '"' + rdfa_clause + '>' + unicode(
                    image_as_dict[key]) + '</span>'
            html_block += '</p>'
            image_as_dict['their_data'] += html_block

        def get_template_str():
            template_file = 'django_template.html'
            path = os.path.dirname(__file__)
            relpath = os.path.relpath(path)
            template_relpath = relpath + '/' + template_file
            fp = open(template_relpath, 'r')
            template_as_str = fp.read()
            return template_as_str

        # the table of image downloads
        image_as_dict['download_links'] = u'<table id="download_links">'
        for resolution, data in self.scraper.resolutions.items():
            image_as_dict['download_links'] += u'<tr>'
            image_as_dict['download_links'] += u'<td>' + resolution + ':</td>'
            orig_url = self.get_resolution_url(resolution, image_as_dict['id'])
            #image_as_dict['download_links'] += u'<td><a href="' + orig_url + '">' + self.scraper.abbrev.upper() + '</a></td>'
            image_as_dict[
                'download_links'] += u'<td><a href="' + orig_url + '">Original</a></td>'
            # if we've downloaded the image
            if self.get_resolution_status(image_as_dict['id'], resolution):
                our_url = self.scraper.get_web_resolution_local_image_location(
                    resolution, image_as_dict['id'], remote_url=orig_url)
                image_as_dict[
                    'download_links'] += u'<td><a href="' + our_url + '">Usable Image Mirror</a></td>'
            else:
                image_as_dict['download_links'] += u'<td></td>'
        image_as_dict['download_links'] += u'</table>'

        template_str = get_template_str()
        template = Template(template_str)
        context = Context({'image': image_as_dict, 'image_urls': image_urls})
        html = template.render(context)
        return html
예제 #52
0
import urlparse
from datetime import datetime, timedelta
from werkzeug.useragents import UserAgent
from pygeoip import GeoIP, MMAP_CACHE
import threading
import re
from .. import config
from sqlalchemy.ext.sqlsoup import SqlSoup
from pystil.db import desc
from pystil.data.utils import parse_referrer, parse_domain

db = SqlSoup(config.CONFIG["DB_URL"])
Visit = db.visit

ipv4re = re.compile(r"(\d{1,3}(\.|$)){4}")

gip_tl = threading.local()


class Message(object):
    def __init__(self, query, user_agent, remote_addr):
        self.query = query
        self.stamp = datetime.utcnow()
        self.user_agent = user_agent
        self.remote_addr = remote_addr

    @property
    def gip(self):
        if not hasattr(gip_tl, 'gip'):
            gip_tl.gip = GeoIP(config.CONFIG['IP_DB'], MMAP_CACHE)
        return gip_tl.gip
예제 #53
0
if not (db_user and db_pass and db_socket and days):
    print 'Config error!'
    sys.exit(-1)

if not days.isdigit():
    print 'Config file error, days must be a number'
    sys.exit(-1)

date = int(time()) - (86400 * int(days))

mysql_uri = "mysql://%s:%s@localhost/%%s?unix_socket=%s" % \
                (db_user, db_pass, db_socket)

zimbra_uri = mysql_uri % ('zimbra')
zimbra_db = SqlSoup(zimbra_uri)

zimbra_user = pwd.getpwnam('zimbra')
zimbra_uid = zimbra_user.pw_uid
zimbra_gid = zimbra_user.pw_gid

volume = False


def get_volume(volume_id):
    global volume
    if not volume or volume.id != volume_id:
        volume = zimbra_db.volume.filter(
            zimbra_db.volume.id == volume_id).one()
    return volume
예제 #54
0
파일: upgrade.py 프로젝트: wookiesh/domogik
class Upgrade:
    def __init__(self, engine):
        self.__db = SqlSoup(engine)

    def process(self):
        """Main function that run the update process"""
        #_upgrade_system_info_structure()
        old_app_version = self.get_current_app_version()
        old_db_version = self.get_current_db_version()
        print("=== Upgrade process")
        print("\t> Current version (application : %s, database = %s)" 
              % (self.get_current_app_version(), self.get_current_db_version()))
        print("\t> New version (application : %s, database = %s)" 
              % (self.get_new_app_version(), self.get_new_db_version()))
        self._sanity_check_before_upgrade()
        # Database upgrade
        while upgrade_scripts.db_upgrade(self):
            pass
        if old_db_version == self.get_current_db_version():
            print("\tThe database was NOT upgraded: nothing to do!")
        
        # Application upgrade
        while upgrade_scripts.app_upgrade(self):
            pass
        if old_app_version == self.get_current_app_version():
            print("\tThe application was NOT upgraded: nothing to do!")

        print("=== Upgrade process terminated")
    
    def set_version(self, app_version, db_version):
        """Set the version of the application and the database"""
        self.update_app_version(app_version)
        self.update_db_version(db_version)
        self.commit()
    
    def commit(self):
        self.__db.commit()
        
    #####################
    # Utility functions #
    #####################

    def _sanity_check_before_upgrade(self):
        """Check that the upgrade process can be run"""
        
        # We use NormalizedVersion to be able to make comparisons
        new_db_version = self.get_new_db_version()
        new_app_version = self.get_new_app_version()
        current_db_version = self.get_current_db_version()
        current_app_version = self.get_current_app_version()

        if new_db_version > new_app_version:
            print("Internal error")
            print("The new database version number (%s) can't be superior to the application one (%s)"
                  % (new_db_version, new_app_version))
            self._abort_upgrade_process()
        
        if current_db_version > new_db_version:
            print("Something is wrong with your installation:")
            print("Your database version number (%s) is superior to the one you're trying to install (%s)" 
                  % (current_db_version, new_db_version))
            self._abort_upgrade_process()

        if current_app_version > new_app_version:
            print("Something is wrong with your installation:")
            print("Your application version number (%s) is superior to the one you're trying to install (%s)" 
                  % (current_app_version, new_app_version))
            self._abort_upgrade_process()

        if current_db_version > current_app_version:
            print("Something is wrong with your installation:")
            print("Your database version number (%s) is superior to the application one (%s)" 
                  % (current_db_version, current_app_version))
            self._abort_upgrade_process()

    def get_current_db_version(self):
        """Return the current version of the database"""
        db_version = self._sql_execute("SELECT db_version FROM core_system_info").fetchone()[0]
        if db_version is None or db_version == '':
            # Should only happen for the first upgrade using this script
            return NormalizedVersion('0.1.0')
        else:
            return NormalizedVersion(self._suggest_normalized_version(db_version))

    def get_new_db_version(self):
        """Return the version of the database we should upgrade to (normalized version)"""
        return NormalizedVersion(self._suggest_normalized_version(DB_VERSION))

    def update_db_version(self, db_version):
        """Update the version of the database"""
        if self._sql_execute("SELECT db_version FROM core_system_info").fetchone() is None:
            sql = "INSERT INTO core_system_info (db_version) VALUES('%s')" % db_version
        else:
            sql = "UPDATE core_system_info SET db_version='%s'" % db_version
        self._sql_execute(sql)

    def get_current_app_version(self):
        """Return the current version of the application"""
        try:
            
            app_version = self._sql_execute("SELECT app_version FROM core_system_info").fetchone()[0]
            # Should only happen if the 'app_version' column doesn't exist (first application upgrade using this script)
            if app_version is None or app_version == '':
                app_version = NormalizedVersion('0.1.0')
            return NormalizedVersion(self._suggest_normalized_version(app_version))
        except Exception:
            return NormalizedVersion('0.1.0')

    def get_new_app_version(self):
        """Return the version of the application we should upgrade to (normalized version)"""
        return NormalizedVersion(self._suggest_normalized_version(__version__))

    def update_app_version(self, app_version):
        """Update the version of the application"""
        if self._sql_execute("SELECT app_version FROM core_system_info").fetchone() is None:
            sql = "INSERT INTO core_system_info (app_version) VALUES('%s')" % app_version
        else:
            sql = "UPDATE core_system_info SET app_version='%s'" % app_version
        self._sql_execute(sql)

    def _suggest_normalized_version(self, version):
        n_version = suggest_normalized_version(version)
        if n_version is None:
            print("Error : invalid version number : %s" % version)
            print("See : http://wiki.domogik.org/Release_numbering")
            self._abort_install_process()
        else:
            return n_version

    def _sql_execute(self, sql_code):
        return self.__db.execute(sql_code)

    def _abort_upgrade_process(self, message=""):
        print("Upgrade process aborted : %s" % message)
        sys.exit(1)
예제 #55
0
    Q = dbgrafo.execute(
        'select artigo_id_1,esfera_1,artigo_1,lei_1,artigo_id_2,esfera_2, artigo_2, lei_2, peso from vw_gr_artigo_artigo where  peso >100'
    )
    if not nedges:
        res = Q.fetchall()
        nedges = len(res)
    else:
        res = Q.fetchmany(nedges)
    eds = [(i[0], i[4], i[8]) for i in res]
    G = nx.Graph()
    G.add_weighted_edges_from(eds)
    print "== Grafo Artigo_Artigo == "
    print "==> Order: ", G.order()
    print "==> # Edges: ", len(G.edges())
    print "==> # Cliques: ", nx.algorithms.clique.graph_number_of_cliques(G)
    print "==> Avg. Clustering: ", nx.average_clustering(G)


if __name__ == "__main__":
    import socket
    dbgrafo = SqlSoup("%s/SEN_Grafo" % MySQLServer)
    dbdec = SqlSoup("%s/STF_Analise_Decisao" % MySQLServer)
    #cf88_vs_outras(500)
    #dyn_graph(1000)
    G, elist = lei_vs_lei()
    artigo_artigo()

    #P.show()
    dyn_graph_lei(elist)
예제 #56
0
class DictionaryLookup:
    
    #FIXME: REFACTOR, REFACTOR, friggin' REFACTOR all this mess!
    def __init__(self):
        self.db = SqlSoup(SQLITE + PATH_TO_RES + JMDICT)
        #self.joinTables()   #TODO: should move somewhere (may be pre-dump routine?)
        self.dictionary = {}
        self.loadJmdictFromDumpRegex()	#TODO: add check
        
    def joinTables(self):
        '''Join tables on init for better perfomance'''
        join_word = self.db.join(self.db.k_ele, self.db.r_ele, self.db.k_ele.fk==self.db.r_ele.fk, isouter=True)
        join_sense = self.db.join(join_word, self.db.sense, join_word.fk==self.db.sense.fk )
        join_sense_labels = self.db.with_labels(join_sense)
        
        self.join_gloss = self.db.join(join_sense_labels, self.db.gloss, join_sense_labels.sense_id==self.db.gloss.fk)
    
    def dumpJmdictToFileMulilang(self, languages):
        '''VERY time (and memory) consuming'''
                
        dictionary = {}
        everything = self.join_gloss.all()
        print len(everything)
        for item in everything:
#            if item.lang == 'eng':
#                if dictionary.has_key(item.r_ele_value):
#                    dictionary[item.r_ele_value].append({'word' : item.k_ele_value, 'sense' : item.value})
#                else:
#                    dictionary[item.r_ele_value] = [{'word' : item.k_ele_value, 'sense' : item.value}]    #what about r_ele_nokanji?
#            if item.lang == 'eng' or item.lang == 'rus':    #(*)
            if item.lang in languages:
                if dictionary.has_key(item.r_ele_value):
                    if dictionary[item.r_ele_value].has_key(item.k_ele_value):
                        if dictionary[item.r_ele_value][item.k_ele_value].has_key(item.lang):
                            dictionary[item.r_ele_value][item.k_ele_value][item.lang].append(item.value)
                        else:
                            dictionary[item.r_ele_value][item.k_ele_value][item.lang] = [item.value]
                    else:
                            dictionary[item.r_ele_value][item.k_ele_value] = { item.lang : [item.value] } 

    #                if item.lang == 'eng':
    #                    dictionary[item.r_ele_value]['sense']['eng'].append(item.value)
    #                if item.lang == 'rus':
    #                    dictionary[item.r_ele_value]['sense']['rus'].append(item.value)
                    
#                    dictionary[item.r_ele_value][item.lang].append(item.value)         #without (*) check - will copy everythig
#                    dictionary[item.r_ele_value]['word'] = item.k_ele_value

                else:
                    #dictionary[item.r_ele_value] = [{'word' : item.k_ele_value, item.lang : [item.value] }] 
                    dictionary[item.r_ele_value] = { item.k_ele_value : { item.lang : [item.value] } }

            #===================================================================
            # if item.lang == 'eng':
            #    if dictionary.has_key(item.r_ele_value):
            #        dictionary[item.r_ele_value]['word'] = item.k_ele_value
            #        dictionary[item.r_ele_value]['sense']['eng'].append(item.value)
            #    else:
            #        dictionary[item.r_ele_value] = [{ 'word' : item.k_ele_value, 'eng' : [item.value] }] 
            # elif item.lang == 'rus':
            #    if dictionary.has_key(item.r_ele_value):
            #        dictionary[item.r_ele_value]['word'] = item.k_ele_value
            #        dictionary[item.r_ele_value]['sense']['rus'].append(item.value)
            #    else:
            #        dictionary[item.r_ele_value] = [{'word' : item.k_ele_value, 'rus' : [item.value] }] 
            #===================================================================

            #NB:[{ 'word' : ..., 'sense' : { 'rus' : [], 'eng' : [] } ]
            #or
            #NB: [{ 'word' : ..., 'rus' : [], 'eng' : [] } <--- the best?
            #or (even better)
            #additionally, check if 'word' is the same ~ append to rus/eng/... senses, not to item by key;
            #then, not dict[key] = [{ ... }] but dict[key] = { ... [], [] }
            #(probably won't work, reading : word is not 1:1)
                    
#            if item.lang == 'eng':
#                if dictionary.has_key(item.r_ele_value):
#                    dictionary[item.r_ele_value]['word'] = item.k_ele_value
#                    dictionary[item.r_ele_value]['sense']['eng'] = item.value
#                else:
#                    dictionary[item.r_ele_value] = [{'word' : item.k_ele_value, 'sense' : {'eng' : item.value}}] 
#            elif item.lang == 'rus':
#                if dictionary.has_key(item.r_ele_value):
#                    dictionary[item.r_ele_value]['word'] = item.k_ele_value
#                    dictionary[item.r_ele_value]['sense']['rus'] = item.value
#                else:
#                    dictionary[item.r_ele_value] = [{'word' : item.k_ele_value, 'sense' : {'rus' : item.value}}] 
            
        dump = open(PATH_TO_RES + JMDICT_DUMP, 'w')
        pickle.dump(dictionary, dump)
        
        del dictionary
        del everything
        dump.close()
    
    def dumpJmdictToFile(self, lang='eng'):
        
        dictionary = {}
        everything = self.join_gloss.all()
        print len(everything)
        for item in everything:
            if item.lang == lang:
                #NB:[ { 'word' : .., 'sense' : [..] } ] ~ dictionary composed of list of dictionaries
                #===============================================================
                # if dictionary.has_key(item.r_ele_value):
                #    dictionary[item.r_ele_value].append({'word' : item.k_ele_value, 'sense' : item.value})
                # else:
                #    dictionary[item.r_ele_value] = [{'word' : item.k_ele_value, 'sense' : item.value}]
                #===============================================================
                
                #NB: { word : [ senses ] } ~ dictionary of dictionaries
                if dictionary.has_key(item.r_ele_value):
                    if dictionary[item.r_ele_value].has_key(item.k_ele_value):
                        dictionary[item.r_ele_value][item.k_ele_value].append(item.value)
                    else:
                        dictionary[item.r_ele_value][item.k_ele_value] = [item.value]
                else:
                    dictionary[item.r_ele_value] = { item.k_ele_value : [item.value]  }
                    
        dump = open(PATH_TO_RES + JMDICT_DUMP, 'w')
        pickle.dump(dictionary, dump)
        
        del dictionary
        del everything
        dump.close()
        
    def dumpJmdictToFileWithRegex(self, lang='eng'):
        '''VERY time consuming'''
                
        dictionary = redict({})     #NB: regex dictionary, [] is an generator object (iterator)
        everything = self.join_gloss.all()
        print len(everything)
        for item in everything:
            #NB:[ { 'word' : .., 'sense' : [..] } ] ~ dictionary composed of list of dictionaries
            #===================================================================
            #    if dictionary.has_key(item.r_ele_value):
            #        dictionary.get(item.r_ele_value).append({'word' : item.k_ele_value, 'sense' : item.value})
            #    else:
            #        dictionary[item.r_ele_value] = [{'word' : item.k_ele_value, 'sense' : item.value}]
            #===================================================================
            
            #NB: { word : [ senses ] } ~ dictionary of dictionaries
            if item.lang == lang:
                if dictionary.has_key(item.r_ele_value):
                    if dictionary.get(item.r_ele_value).has_key(item.k_ele_value):
                        dictionary.get(item.r_ele_value).get(item.k_ele_value).append(item.value)
                    else:
                        dictionary.get(item.r_ele_value)[item.k_ele_value] = [item.value]
                else:
                    dictionary[item.r_ele_value] = { item.k_ele_value : [item.value], 'kana' : item.r_ele_value }     
            
        dump = open(PATH_TO_RES + JMDICT_DUMP + '_rx', 'w')
        pickle.dump(dictionary, dump)
        
        del dictionary
        del everything
        dump.close()
        
    def loadJmdictFromDump(self):
        dump = open(PATH_TO_RES + JMDICT_DUMP, 'r')
        self.dictionary = pickle.load(dump)
        dump.close()
           
    def loadJmdictFromDumpRegex(self):
        dump = open(PATH_TO_RES + JMDICT_DUMP + '_rx', 'r')
        self.dictionaryR = pickle.load(dump)
        dump.close()
        
예제 #57
0
def populate():

    tables = [('fermentables', 'Fermentable'), ('hops', 'Hop'),
              ('yeast', 'Yeast'), ('extras', 'Extra')]

    path = os.path.dirname(os.path.abspath(data.__file__))
    handle = SqlSoup('sqlite:///%s' % os.path.join(path, 'ingredients.db'))

    for table, cls in tables:
        print "-" * 80
        print DARKBLUE + table.upper() + ENDS
        print "-" * 80
        for ingredient in getattr(handle, table).all():

            # Coerce the data mapping into a dictionary
            kwargs = dict(
                (k, getattr(ingredient, k, '')) for k in ingredient.c.keys())

            # Attempt to look up the entity first
            uid = kwargs['uid']
            ingredient = getattr(entities, cls).get_by(uid=uid)

            # If the entity doesn't already exist, create it
            new = False
            changed = False

            if ingredient is None:
                new = True
                ingredient = getattr(entities, cls)()

            # Update necessary columns
            for k, v in kwargs.items():
                if not ne(getattr(ingredient, k), v):
                    changed = True

                if k == 'liquid':
                    v = True if v == 1 else False

                setattr(ingredient, k, v)

            if new is True:
                print "%s (%s)" % (ingredient.printed_name.encode(
                    'utf-8', 'ignore'), '%s New %s' % (GREEN, ENDS))
            elif changed is True:
                print "%s (%s)" % (ingredient.printed_name.encode(
                    'utf-8', 'ignore'), '%s Updated %s' % (YELLOW, ENDS))
            else:
                print "%s (No Changes)" % ingredient.printed_name.encode(
                    'utf-8', 'ignore')

    print "=" * 80
    print BLUE + "GENERATING STYLES" + ENDS
    print "=" * 80

    handle = SqlSoup('sqlite:///%s' % os.path.join(path, 'styles.db'))
    for style in handle.styles.all():
        # Coerce the data mapping into a dictionary
        kwargs = dict((k, getattr(style, k, '')) for k in style.c.keys())

        # Attempt to look up the entity first
        uid = kwargs['uid']
        style = entities.Style.get_by(uid=uid)

        # If the entity doesn't already exist, create it
        new = False
        changed = False

        if style is None:
            new = True
            style = entities.Style()

        # Update necessary columns
        for k, v in kwargs.items():
            if not ne(getattr(style, k), v):
                changed = True
            setattr(style, k, v)

        if new is True:
            print "%s (%s)" % (style.name.encode(
                'utf-8', 'ignore'), '%s New %s' % (GREEN, ENDS))
        elif changed is True:
            print "%s (%s)" % (style.name.encode(
                'utf-8', 'ignore'), '%s Updated %s' % (YELLOW, ENDS))
        else:
            print "%s (No Changes)" % style.name.encode('utf-8', 'ignore')

    model.commit()
예제 #58
0
 def __init__(self):
     self.db = SqlSoup(SQLITE + PATH_TO_RES + JMDICT)
     #self.joinTables()   #TODO: should move somewhere (may be pre-dump routine?)
     self.dictionary = {}
     self.loadJmdictFromDumpRegex()	#TODO: add check