Beispiel #1
0
def getSupression():
    global db_url
    global supressions
    global ports
    global ip_ports
#This is to get all guarantees from the sql in the dwarf-server
    options = {"sql_connection": db_url}
    db = SqlSoup(options["sql_connection"])
    sup_info = db.supression.all()
    c_time = int(time.time())
    db.commit()
    for sup in sup_info:
        src_ip = sup.src_ip
        port_name = ip_ports[src_ip]["port_name"]
        print "getting db"
        print port_name
        for pid in ports:
            if port_name == ports[pid].port_name:
                print "this supression is mine"
                supress = sup.supression
                o_time = sup.time
                if port_name in supressions:
                    if c_time < (o_time + 10):
                        print "wow, a new one!" 
                        supressions[port_name]=supress
                    else:
                        del supressions[port_name]
Beispiel #2
0
def main():
    db_name = sys.argv[1]
    db = SqlSoup(db_name)
    while True:
        try:
            cmd = raw_input(prompt)
            if not cmd:
                continue
            if cmd.lower() in ("help", "?"):
                show_help()
                continue
            sql = get_sql(cmd, db.engine.name) or cmd
            res = db.execute(sql)
            print_result(res)
            db.commit()

        except (SystemExit, KeyboardInterrupt, EOFError):
            print "\nBye!\n"
            return 0

        except Exception:
            traceback.print_exc()
            db.rollback()
Beispiel #3
0
def main():
    global agent_port
    global db_url
    PreConfig()
    #read db for ip and supression
    options = {"sql_connection": db_url}
    db = SqlSoup(options["sql_connection"])
    ips = db.ip_port.all()
    supressions = db.supression.all()
    db.commit()
    sup = {}
#{src_ip:{"supress":sup, "timeleft":time}
    for supress in supressions:
        sup[supress.src_ip] = "20"
#setup sockets
    sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.bind((server_ip, int(server_port)))
    sock.listen(100)
    while True:  
        print time.clock()
        connection,address = sock.accept()  
        try:  
            connection.settimeout(5)  
            buf = connection.recv(1024)  
            flow2 = json.loads(buf)
            #flow2 = {u'src_ip': u'10.10.0.5', u'supression': 0}
            print "Server Received: %s " %(flow2)
            try:
                src_ip = flow2['src_ip']
                supression = flow2['supression'] 
                if src_ip in sup:
                    print "in sup, modify: " + src_ip
                    db.supression.get(src_ip).supression = supression
                    db.supression.get(src_ip).time = int(time.time())
                    db.commit()
                else:
                    print "not in, adding: " + src_ip
                    print supression
                    db.supression.insert(src_ip=src_ip,supression=supression, time=int(time.time()))
                    sup[src_ip]=supression
                    db.commit()
            except Exception:
                
                print "f**k,sql wrong?"
        except socket.timeout:  
            print 'time out'  
        except:
            print 'well, something wrong'
        connection.close()   
Beispiel #4
0
        try:
            archive_dir_path = os.path.dirname(msg_archive_path)
            if not os.path.exists(archive_dir_path):
                mk_dir(archive_dir_path)

            if archive_vol.compress_blobs == 1 and \
                    mail_item.size > archive_vol.compression_threshold:
                copy_compress(msg_path, msg_archive_path)
                print 'zipped',
            else:
                shutil.move(msg_path, msg_archive_path)

            set_perm(msg_archive_path)

            mail_item.volume_id = archive_vol.id
            mboxgroup_db.commit()

        except IOError, e:
            print 'ERROR:', e
            continue

        print 'OK'
        archived_msgs += 1

    current_time = datetime.now()
    diff_time = current_time - start_time
    if diff_time.seconds > xtime:
        print "Stopping archiving, time excedded."
        break

    i += 1
Beispiel #5
0
    def daemon_loop(self, db_connection_url):
        """Main processing loop (not currently used).

        :param options: database information - in the event need to reconnect
        """
        old_local_bindings = {}
        old_vif_ports = {}
        self.db_connected = False

        while True:
            if not self.db_connected:
                time.sleep(self.reconnect_interval)
                db = SqlSoup(db_connection_url)
                self.db_connected = True
                LOG.info('Connecting to database "%s" on %s' % (db.engine.url.database, db.engine.url.host))

            # Get bindings from db.
            all_bindings = self.get_db_port_bindings(db)
            if not self.db_connected:
                continue
            all_bindings_vif_port_ids = set(all_bindings.keys())
            lsw_id_bindings = self.get_db_vlan_bindings(db)
            if not self.db_connected:
                continue

            # Get bindings from OVS bridge.
            vif_ports = self.int_br.get_vif_ports()
            new_vif_ports = dict([(p.vif_id, p) for p in vif_ports])
            new_vif_ports_ids = set(new_vif_ports.keys())

            old_vif_ports_ids = set(old_vif_ports.keys())
            dead_vif_ports_ids = new_vif_ports_ids - all_bindings_vif_port_ids
            dead_vif_ports = [new_vif_ports[p] for p in dead_vif_ports_ids]
            disappeared_vif_ports_ids = old_vif_ports_ids - new_vif_ports_ids
            new_local_bindings_ids = all_bindings_vif_port_ids.intersection(new_vif_ports_ids)
            new_local_bindings = dict([(p, all_bindings.get(p)) for p in new_vif_ports_ids])
            new_bindings = set((p, old_local_bindings.get(p), new_local_bindings.get(p)) for p in new_vif_ports_ids)
            changed_bindings = set([b for b in new_bindings if b[2] != b[1]])

            LOG.debug("all_bindings: %s" % all_bindings)
            LOG.debug("lsw_id_bindings: %s" % lsw_id_bindings)
            LOG.debug("old_vif_ports_ids: %s" % old_vif_ports_ids)
            LOG.debug("dead_vif_ports_ids: %s" % dead_vif_ports_ids)
            LOG.debug("old_vif_ports_ids: %s" % old_vif_ports_ids)
            LOG.debug("new_local_bindings_ids: %s" % new_local_bindings_ids)
            LOG.debug("new_local_bindings: %s" % new_local_bindings)
            LOG.debug("new_bindings: %s" % new_bindings)
            LOG.debug("changed_bindings: %s" % changed_bindings)

            # Take action.
            for p in dead_vif_ports:
                LOG.info("No quantum binding for port " + str(p) + "putting on dead vlan")
                self.port_dead(p)

            for b in changed_bindings:
                port_id, old_port, new_port = b
                p = new_vif_ports[port_id]
                if old_port:
                    old_net_uuid = old_port.network_id
                    LOG.info("Removing binding to net-id = " + old_net_uuid + " for " + str(p) + " added to dead vlan")
                    self.port_unbound(p, old_net_uuid)
                    if not new_port:
                        self.port_dead(p)

                if new_port:
                    new_net_uuid = new_port.network_id
                    if new_net_uuid not in lsw_id_bindings:
                        LOG.warn("No ls-id binding found for net-id '%s'" % new_net_uuid)
                        continue

                    lsw_id = lsw_id_bindings[new_net_uuid]
                    try:
                        self.port_bound(p, new_net_uuid, lsw_id)
                        LOG.info(
                            "Port "
                            + str(p)
                            + " on net-id = "
                            + new_net_uuid
                            + " bound to "
                            + str(self.local_vlan_map[new_net_uuid])
                        )
                    except Exception as e:
                        LOG.info(
                            "Unable to bind Port "
                            + str(p)
                            + " on netid = "
                            + new_net_uuid
                            + " to "
                            + str(self.local_vlan_map[new_net_uuid])
                        )

            for vif_id in disappeared_vif_ports_ids:
                LOG.info("Port Disappeared: " + vif_id)
                old_port = old_local_bindings.get(vif_id)
                if old_port:
                    try:
                        self.port_unbound(old_vif_ports[vif_id], old_port.network_id)
                    except Exception:
                        LOG.info("Unable to unbind Port " + str(p) + " on net-id = " + old_port.network_uuid)

            old_vif_ports = new_vif_ports
            old_local_bindings = new_local_bindings
            try:
                db.commit()
            except Exception as e:
                LOG.info("Unable to commit to database! Exception: %s" % e)
                db.rollback()
                old_local_bindings = {}
                old_vif_ports = {}

            time.sleep(self.polling_interval)
Beispiel #6
0
    def daemon_loop(self, db_connection_url):
        self.local_vlan_map = {}
        old_local_bindings = {}
        old_vif_ports = {}
        db_connected = False

        while True:
            if not db_connected:
                time.sleep(self.reconnect_interval)
                db = SqlSoup(db_connection_url)
                db_connected = True
                LOG.info('Connecting to database "%s" on %s' % (db.engine.url.database, db.engine.url.host))

            all_bindings = {}
            try:
                ports = db.ports.all()
            except Exception as e:
                LOG.info("Unable to get port bindings! Exception: %s" % e)
                db_connected = False
                continue

            for port in ports:
                all_bindings[port.interface_id] = port

            vlan_bindings = {}
            try:
                vlan_binds = db.vlan_bindings.all()
            except Exception as e:
                LOG.info("Unable to get vlan bindings! Exception: %s" % e)
                db_connected = False
                continue

            for bind in vlan_binds:
                vlan_bindings[bind.network_id] = bind.vlan_id

            new_vif_ports = {}
            new_local_bindings = {}
            vif_ports = self.int_br.get_vif_ports()
            for p in vif_ports:
                new_vif_ports[p.vif_id] = p
                if p.vif_id in all_bindings:
                    net_id = all_bindings[p.vif_id].network_id
                    new_local_bindings[p.vif_id] = net_id
                else:
                    # no binding, put him on the 'dead vlan'
                    self.int_br.set_db_attribute("Port", p.port_name, "tag", DEAD_VLAN_TAG)
                    self.int_br.add_flow(priority=2, match="in_port=%s" % p.ofport, actions="drop")

                old_b = old_local_bindings.get(p.vif_id, None)
                new_b = new_local_bindings.get(p.vif_id, None)

                if old_b != new_b:
                    if old_b is not None:
                        LOG.info("Removing binding to net-id = %s for %s" % (old_b, str(p)))
                        self.port_unbound(p, True)
                        if p.vif_id in all_bindings:
                            all_bindings[p.vif_id].op_status = OP_STATUS_DOWN
                    if new_b is not None:
                        # If we don't have a binding we have to stick it on
                        # the dead vlan
                        net_id = all_bindings[p.vif_id].network_id
                        vlan_id = vlan_bindings.get(net_id, DEAD_VLAN_TAG)
                        self.port_bound(p, vlan_id)
                        if p.vif_id in all_bindings:
                            all_bindings[p.vif_id].op_status = OP_STATUS_UP
                        LOG.info(("Adding binding to net-id = %s " "for %s on vlan %s") % (new_b, str(p), vlan_id))

            for vif_id in old_vif_ports:
                if vif_id not in new_vif_ports:
                    LOG.info("Port Disappeared: %s" % vif_id)
                    if vif_id in old_local_bindings:
                        old_b = old_local_bindings[vif_id]
                        self.port_unbound(old_vif_ports[vif_id], False)
                    if vif_id in all_bindings:
                        all_bindings[vif_id].op_status = OP_STATUS_DOWN

            old_vif_ports = new_vif_ports
            old_local_bindings = new_local_bindings
            try:
                db.commit()
            except Exception as e:
                LOG.info("Unable to commit to database! Exception: %s" % e)
                db.rollback()
                old_local_bindings = {}
                old_vif_ports = {}

            time.sleep(self.polling_interval)
Beispiel #7
0
class MySQLBind9Backend(base.Backend):
    __plugin_name__ = 'mysqlbind9'

    def get_url_data(self):
        url = _parse_rfc1738_args(cfg.CONF[self.name].database_connection)
        return url.translate_connect_args()

    def get_dns_table(self, table=None):
        """
        Get a Table object from SQLSoup

        :param table: Overridable table name
        """
        table = table or cfg.CONF[self.name].database_dns_table
        return getattr(self._db, table)

    def start(self):
        super(MySQLBind9Backend, self).start()

        if cfg.CONF[self.name].write_database:
            self._engine = get_engine(self.name)
            self._db = SqlSoup(self._engine)

        self._sync_domains()

    def _add_soa_record(self, domain, servers):
        """
        add the single SOA record for this domain. Must create the
        data from attributes of the domain
        """
        table = self.get_dns_table()
        data_rec = "%s. %s. %d %d %d %d %d" % (
                   servers[0]['name'],
                   domain['email'].replace("@", "."),
                   domain['serial'],
                   domain['refresh'],
                   domain['retry'],
                   domain['expire'],
                   domain['minimum'])

        # use the domain id for records that don't have a match
        # in designate's records table
        table.insert(
            tenant_id=domain['tenant_id'],
            domain_id=domain['id'],
            designate_rec_id=domain['id'],
            name=domain['name'],
            ttl=domain['ttl'],
            type='SOA',
            data=data_rec)
        self._db.commit()

    def _add_ns_records(self, domain, servers):
        """
        add the NS records, one for each server, for this domain
        """
        table = self.get_dns_table()

        # use the domain id for records that don't have a match
        # in designate's records table
        for server in servers:
            table.insert(
                tenant_id=domain['tenant_id'],
                domain_id=domain['id'],
                designate_rec_id=domain['id'],
                name=domain['name'],
                ttl=domain['ttl'],
                type='NS',
                data=server['name'])

        self._db.commit()

    def _insert_db_record(self, tenant_id, domain_id, record):
        """
        generic db insertion method for a domain record
        """
        table = self.get_dns_table()
        table.insert(
            tenant_id=tenant_id,
            domain_id=domain_id,
            designate_rec_id=record['id'],
            name=record['name'],
            ttl=record['ttl'],
            type=record['type'],
            data=record['data'])
        self._db.commit()

    def _update_ns_records(self, domain, servers):
        """
        delete and re-add all NS records : easier to just delete all
        NS records and then replace - in the case of adding new NS
        servers
        """
        table = self.get_dns_table()

        all_ns_rec = table.filter_by(tenant_id=domain['tenant_id'],
                                     domain_id=domain['id'],
                                     type=u'NS')

        # delete all NS records
        all_ns_rec.delete()
        # add all NS records (might have new servers)
        self._db.commit()

        self._add_ns_records(domain, servers)

    def _update_db_record(self, tenant_id, record):
        """
        generic domain db record update method
        """
        table = self.get_dns_table()

        q = table.filter_by(
            tenant_id=tenant_id,
            domain_id=record['domain_id'],
            designate_rec_id=record['id'])

        q.update({'ttl': record['ttl'],
                  'type': record['type'],
                  'data': record['data']})

        self._db.commit()

    def _update_soa_record(self, domain, servers):
        """
        update the one single SOA record for the domain
        """
        LOG.debug("_update_soa_record()")
        table = self.get_dns_table()

        # there will only ever be -one- of these
        existing_record = table.filter_by(tenant_id=domain['tenant_id'],
                                          domain_id=domain['id'],
                                          type=u'SOA')

        data_rec = "%s. %s. %d %d %d %d %d" % (
                   servers[0]['name'],
                   domain['email'].replace("@", "."),
                   domain['serial'],
                   domain['refresh'],
                   domain['retry'],
                   domain['expire'],
                   domain['minimum'])

        existing_record.update(
            {'ttl': domain['ttl'],
             'type': u'SOA',
             'data': data_rec})

        self._db.commit()

#    def _update_domain_ttl(self, domain):
#        LOG.debug("_update_soa_record()")
#        table = self.get_dns_table()
#
#        # there will only ever be -one- of these
#        domain_records = table.filter_by(domain_id=domain['id'])
#
#        domain_records.update({'ttl': domain['ttl']})
#
#        self._db.commit()

    def _delete_db_record(self, tenant_id, record):
        """
        delete a specific record for a given domain
        """
        table = self.get_dns_table()
        LOG.debug("_delete_db_record")

        q = table.filter_by(
            tenant_id=tenant_id,
            domain_id=record['domain_id'],
            designate_rec_id=record['id'])

        q.delete()

        self._db.commit()

    def _delete_db_domain_records(self, tenant_id, domain_id):
        """
         delete all records for a given domain
         """
        LOG.debug('_delete_db_domain_records()')
        table = self.get_dns_table()

        # delete all records for the domain id
        q = table.filter_by(tenant_id=tenant_id,
                            domain_id=domain_id)
        q.delete()

        self._db.commit()

    def create_domain(self, context, domain):
        LOG.debug('create_domain()')

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.get_servers(self.admin_context)

            self._add_soa_record(domain, servers)
            self._add_ns_records(domain, servers)

        self._sync_domains()

    def update_domain(self, context, domain):
        LOG.debug('update_domain()')

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.get_servers(self.admin_context)

            self._update_soa_record(domain, servers)
            self._update_ns_records(domain, servers)

    def delete_domain(self, context, domain):
        LOG.debug('delete_domain()')

        if cfg.CONF[self.name].write_database:
            self._delete_db_domain_records(domain['tenant_id'],
                                           domain['id'])

        self._sync_domains()

    def create_record(self, context, domain, record):
        LOG.debug('create_record()')
        if cfg.CONF[self.name].write_database:
            self._insert_db_record(domain['tenant_id'],
                                   domain['id'],
                                   record)

    def update_record(self, context, domain, record):
        LOG.debug('update_record()')
        if cfg.CONF[self.name].write_database:
            self._update_db_record(domain['tenant_id'],
                                   record)

    def delete_record(self, context, domain, record):
        LOG.debug('Delete Record')
        if cfg.CONF[self.name].write_database:
            self._delete_db_record(domain['tenant_id'],
                                   record)

    def _sync_domains(self):
        """
        Update the zone file and reconfig rndc to update bind.
        Unike regular bind, this only needs to be done upon adding
        or deleting domains as mysqlbind takes care of updating
        bind upon regular record changes
        """
        LOG.debug('Synchronising domains')

        domains = self.central_service.get_domains(self.admin_context)

        output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
                                     'bind9')

        # Create the output folder tree if necessary
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)

        output_path = os.path.join(output_folder, 'zones.config')

        abs_state_path = os.path.abspath(cfg.CONF.state_path)

        LOG.debug("Getting ready to write zones.config at %s" % output_path)

        # NOTE(CapTofu): Might have to adapt this later on?
        url = self.get_url_data()
        utils.render_template_to_file('mysql-bind9-config.jinja2',
                                      output_path,
                                      domains=domains,
                                      state_path=abs_state_path,
                                      dns_server_type=cfg.CONF[self.name].
                                      dns_server_type,
                                      dns_db_schema=url['database'],
                                      dns_db_table=cfg.CONF[self.name].
                                      database_dns_table,
                                      dns_db_host=url['host'],
                                      dns_db_user=url['username'],
                                      dns_db_password=url['password'])

        # only do this if domain create, domain delete
        rndc_call = [
            'rndc',
            '-s', cfg.CONF[self.name].rndc_host,
            '-p', str(cfg.CONF[self.name].rndc_port),
        ]

        if cfg.CONF[self.name].rndc_config_file:
            rndc_call.extend(['-c', self.config.rndc_config_file])

        if cfg.CONF[self.name].rndc_key_file:
            rndc_call.extend(['-k', self.config.rndc_key_file])

        rndc_call.extend(['reconfig'])

        utils.execute(*rndc_call)
Beispiel #8
0
class MySQLBind9Backend(base.Backend):
    __plugin_name__ = 'mysqlbind9'

    def get_url_data(self):
        url = _parse_rfc1738_args(cfg.CONF[self.name].database_connection)
        return url.translate_connect_args()

    def get_dns_table(self, table=None):
        """
        Get a Table object from SQLSoup

        :param table: Overridable table name
        """
        table = table or cfg.CONF[self.name].database_dns_table
        return getattr(self._db, table)

    def start(self):
        super(MySQLBind9Backend, self).start()

        if cfg.CONF[self.name].write_database:
            self._engine = get_engine(self.name)
            self._db = SqlSoup(self._engine)

        self._sync_domains()

    def _add_soa_record(self, domain, servers):
        """
        add the single SOA record for this domain. Must create the
        data from attributes of the domain
        """
        table = self.get_dns_table()
        data_rec = "%s. %s. %d %d %d %d %d" % (
            servers[0]['name'], domain['email'].replace(
                "@", "."), domain['serial'], domain['refresh'],
            domain['retry'], domain['expire'], domain['minimum'])

        # use the domain id for records that don't have a match
        # in designate's records table
        table.insert(tenant_id=domain['tenant_id'],
                     domain_id=domain['id'],
                     designate_rec_id=domain['id'],
                     name=domain['name'],
                     ttl=domain['ttl'],
                     type='SOA',
                     data=data_rec)
        self._db.commit()

    def _add_ns_records(self, domain, servers):
        """
        add the NS records, one for each server, for this domain
        """
        table = self.get_dns_table()

        # use the domain id for records that don't have a match
        # in designate's records table
        for server in servers:
            table.insert(tenant_id=domain['tenant_id'],
                         domain_id=domain['id'],
                         designate_rec_id=domain['id'],
                         name=domain['name'],
                         ttl=domain['ttl'],
                         type='NS',
                         data=server['name'])

        self._db.commit()

    def _insert_db_record(self, tenant_id, domain_id, record):
        """
        generic db insertion method for a domain record
        """
        table = self.get_dns_table()
        table.insert(tenant_id=tenant_id,
                     domain_id=domain_id,
                     designate_rec_id=record['id'],
                     name=record['name'],
                     ttl=record['ttl'],
                     type=record['type'],
                     data=record['data'])
        self._db.commit()

    def _update_ns_records(self, domain, servers):
        """
        delete and re-add all NS records : easier to just delete all
        NS records and then replace - in the case of adding new NS
        servers
        """
        table = self.get_dns_table()

        all_ns_rec = table.filter_by(tenant_id=domain['tenant_id'],
                                     domain_id=domain['id'],
                                     type=u'NS')

        # delete all NS records
        all_ns_rec.delete()
        # add all NS records (might have new servers)
        self._db.commit()

        self._add_ns_records(domain, servers)

    def _update_db_record(self, tenant_id, record):
        """
        generic domain db record update method
        """
        table = self.get_dns_table()

        q = table.filter_by(tenant_id=tenant_id,
                            domain_id=record['domain_id'],
                            designate_rec_id=record['id'])

        q.update({
            'ttl': record['ttl'],
            'type': record['type'],
            'data': record['data']
        })

        self._db.commit()

    def _update_soa_record(self, domain, servers):
        """
        update the one single SOA record for the domain
        """
        LOG.debug("_update_soa_record()")
        table = self.get_dns_table()

        # there will only ever be -one- of these
        existing_record = table.filter_by(tenant_id=domain['tenant_id'],
                                          domain_id=domain['id'],
                                          type=u'SOA')

        data_rec = "%s. %s. %d %d %d %d %d" % (
            servers[0]['name'], domain['email'].replace(
                "@", "."), domain['serial'], domain['refresh'],
            domain['retry'], domain['expire'], domain['minimum'])

        existing_record.update({
            'ttl': domain['ttl'],
            'type': u'SOA',
            'data': data_rec
        })

        self._db.commit()

#    def _update_domain_ttl(self, domain):
#        LOG.debug("_update_soa_record()")
#        table = self.get_dns_table()
#
#        # there will only ever be -one- of these
#        domain_records = table.filter_by(domain_id=domain['id'])
#
#        domain_records.update({'ttl': domain['ttl']})
#
#        self._db.commit()

    def _delete_db_record(self, tenant_id, record):
        """
        delete a specific record for a given domain
        """
        table = self.get_dns_table()
        LOG.debug("_delete_db_record")

        q = table.filter_by(tenant_id=tenant_id,
                            domain_id=record['domain_id'],
                            designate_rec_id=record['id'])

        q.delete()

        self._db.commit()

    def _delete_db_domain_records(self, tenant_id, domain_id):
        """
         delete all records for a given domain
         """
        LOG.debug('_delete_db_domain_records()')
        table = self.get_dns_table()

        # delete all records for the domain id
        q = table.filter_by(tenant_id=tenant_id, domain_id=domain_id)
        q.delete()

        self._db.commit()

    def create_domain(self, context, domain):
        LOG.debug('create_domain()')

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)

            self._add_soa_record(domain, servers)
            self._add_ns_records(domain, servers)

        self._sync_domains()

    def update_domain(self, context, domain):
        LOG.debug('update_domain()')

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)

            self._update_soa_record(domain, servers)
            self._update_ns_records(domain, servers)

    def delete_domain(self, context, domain):
        LOG.debug('delete_domain()')

        if cfg.CONF[self.name].write_database:
            self._delete_db_domain_records(domain['tenant_id'], domain['id'])

        self._sync_domains()

    def create_server(self, context, server):
        LOG.debug('create_server()')

        raise exceptions.NotImplemented('create_server() for '
                                        'mysqlbind9 backend is '
                                        'not implemented')
        """
        TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
        if cfg.CONF[self.name].write_database:
            domains = self.central_service.find_domains(self.admin_context)

            for domain in domains:
                self._add_ns_records(domain, server)

        self._sync_domains()
        """

#   This method could be a very expensive and should only be called
#   (e.g., from central) only if the name of the existing server is
#   changed.

    def update_server(self, context, server):
        LOG.debug('update_server()')

        raise exceptions.NotImplemented('update_server() for '
                                        'mysqlbind9 backend is '
                                        'not implemented')
        """
        TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)
            domains = self.central_service.find_domains(self.admin_context)

            for domain in domains:
                self._update_ns_records(domain, servers)

        self._sync_domains()
        """

    def delete_server(self, context, server):
        LOG.debug('delete_server()')

        raise exceptions.NotImplemented('delete_server() for '
                                        'mysqlbind9 backend is'
                                        ' not implemented')
        """
        TODO: For scale, Use bulk SQLAlchemy (core) queries
        """

    def create_record(self, context, domain, record):
        LOG.debug('create_record()')
        if cfg.CONF[self.name].write_database:
            self._insert_db_record(domain['tenant_id'], domain['id'], record)

    def update_record(self, context, domain, record):
        LOG.debug('update_record()')
        if cfg.CONF[self.name].write_database:
            self._update_db_record(domain['tenant_id'], record)

    def delete_record(self, context, domain, record):
        LOG.debug('Delete Record')
        if cfg.CONF[self.name].write_database:
            self._delete_db_record(domain['tenant_id'], record)

    def _sync_domains(self):
        """
        Update the zone file and reconfig rndc to update bind.
        Unike regular bind, this only needs to be done upon adding
        or deleting domains as mysqlbind takes care of updating
        bind upon regular record changes
        """
        LOG.debug('Synchronising domains')

        domains = self.central_service.find_domains(self.admin_context)

        output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
                                     'bind9')

        # Create the output folder tree if necessary
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)

        output_path = os.path.join(output_folder, 'zones.config')

        abs_state_path = os.path.abspath(cfg.CONF.state_path)

        LOG.debug("Getting ready to write zones.config at %s" % output_path)

        # NOTE(CapTofu): Might have to adapt this later on?
        url = self.get_url_data()
        utils.render_template_to_file(
            'mysql-bind9-config.jinja2',
            output_path,
            domains=domains,
            state_path=abs_state_path,
            dns_server_type=cfg.CONF[self.name].dns_server_type,
            dns_db_schema=url['database'],
            dns_db_table=cfg.CONF[self.name].database_dns_table,
            dns_db_host=url['host'],
            dns_db_user=url['username'],
            dns_db_password=url['password'])

        # only do this if domain create, domain delete
        rndc_call = [
            'rndc',
            '-s',
            cfg.CONF[self.name].rndc_host,
            '-p',
            str(cfg.CONF[self.name].rndc_port),
        ]

        if cfg.CONF[self.name].rndc_config_file:
            rndc_call.extend(['-c', self.config.rndc_config_file])

        if cfg.CONF[self.name].rndc_key_file:
            rndc_call.extend(['-k', self.config.rndc_key_file])

        rndc_call.extend(['reconfig'])

        utils.execute(*rndc_call)
Beispiel #9
0
    # This is how agent could communicate with server
    flow1 = Flow_Info()
    flow1.set_host("mao", "la")

    flow2 = {"src_host":"mao", "dst_host":"la"}

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)  
    sock.connect((server_ip, int(server_port)))  
    sock.send(json.dumps(flow2))  
    print sock.recv(1024)  
    print flow2
    print json.loads(json.dumps(flow2))
    sock.close()  

    options = {"sql_connection": db_url}
    db = SqlSoup(options["sql_connection"])
    #LOG.info("Connecting to database \"%s\" on %s" %

    port_g = db.port_guarantee.all()
    db.commit()

    guarantees = {}
    for port in port_g:
	    guarantees[port.port_name]={'0':port.guarantee}

    print guarantees
            

if __name__ == '__main__':
    main()
Beispiel #10
0
class MySQLBind9Backend(base.Backend):
    __plugin_name__ = "mysqlbind9"

    def get_url_data(self):
        url = _parse_rfc1738_args(cfg.CONF[self.name].database_connection)
        return url.translate_connect_args()

    def get_dns_table(self, table=None):
        """
        Get a Table object from SQLSoup

        :param table: Overridable table name
        """
        table = table or cfg.CONF[self.name].database_dns_table
        return getattr(self._db, table)

    def start(self):
        super(MySQLBind9Backend, self).start()

        if cfg.CONF[self.name].write_database:
            self._engine = get_engine(self.name)
            self._db = SqlSoup(self._engine)

        self._sync_domains()

    def _add_soa_record(self, domain, servers):
        """
        add the single SOA record for this domain. Must create the
        data from attributes of the domain
        """
        table = self.get_dns_table()
        data_rec = "%s. %s. %d %d %d %d %d" % (
            servers[0]["name"],
            domain["email"].replace("@", "."),
            domain["serial"],
            domain["refresh"],
            domain["retry"],
            domain["expire"],
            domain["minimum"],
        )

        # use the domain id for records that don't have a match
        # in designate's records table
        table.insert(
            tenant_id=domain["tenant_id"],
            domain_id=domain["id"],
            designate_rec_id=domain["id"],
            name=domain["name"],
            ttl=domain["ttl"],
            type="SOA",
            data=data_rec,
        )
        self._db.commit()

    def _add_ns_records(self, domain, servers):
        """
        add the NS records, one for each server, for this domain
        """
        table = self.get_dns_table()

        # use the domain id for records that don't have a match
        # in designate's records table
        for server in servers:
            table.insert(
                tenant_id=domain["tenant_id"],
                domain_id=domain["id"],
                designate_rec_id=domain["id"],
                name=domain["name"],
                ttl=domain["ttl"],
                type="NS",
                data=server["name"],
            )

        self._db.commit()

    def _insert_db_record(self, tenant_id, domain_id, record):
        """
        generic db insertion method for a domain record
        """
        table = self.get_dns_table()
        table.insert(
            tenant_id=tenant_id,
            domain_id=domain_id,
            designate_rec_id=record["id"],
            name=record["name"],
            ttl=record["ttl"],
            type=record["type"],
            data=record["data"],
        )
        self._db.commit()

    def _update_ns_records(self, domain, servers):
        """
        delete and re-add all NS records : easier to just delete all
        NS records and then replace - in the case of adding new NS
        servers
        """
        table = self.get_dns_table()

        all_ns_rec = table.filter_by(tenant_id=domain["tenant_id"], domain_id=domain["id"], type=u"NS")

        # delete all NS records
        all_ns_rec.delete()
        # add all NS records (might have new servers)
        self._db.commit()

        self._add_ns_records(domain, servers)

    def _update_db_record(self, tenant_id, record):
        """
        generic domain db record update method
        """
        table = self.get_dns_table()

        q = table.filter_by(tenant_id=tenant_id, domain_id=record["domain_id"], designate_rec_id=record["id"])

        q.update({"ttl": record["ttl"], "type": record["type"], "data": record["data"]})

        self._db.commit()

    def _update_soa_record(self, domain, servers):
        """
        update the one single SOA record for the domain
        """
        LOG.debug("_update_soa_record()")
        table = self.get_dns_table()

        # there will only ever be -one- of these
        existing_record = table.filter_by(tenant_id=domain["tenant_id"], domain_id=domain["id"], type=u"SOA")

        data_rec = "%s. %s. %d %d %d %d %d" % (
            servers[0]["name"],
            domain["email"].replace("@", "."),
            domain["serial"],
            domain["refresh"],
            domain["retry"],
            domain["expire"],
            domain["minimum"],
        )

        existing_record.update({"ttl": domain["ttl"], "type": u"SOA", "data": data_rec})

        self._db.commit()

    #    def _update_domain_ttl(self, domain):
    #        LOG.debug("_update_soa_record()")
    #        table = self.get_dns_table()
    #
    #        # there will only ever be -one- of these
    #        domain_records = table.filter_by(domain_id=domain['id'])
    #
    #        domain_records.update({'ttl': domain['ttl']})
    #
    #        self._db.commit()

    def _delete_db_record(self, tenant_id, record):
        """
        delete a specific record for a given domain
        """
        table = self.get_dns_table()
        LOG.debug("_delete_db_record")

        q = table.filter_by(tenant_id=tenant_id, domain_id=record["domain_id"], designate_rec_id=record["id"])

        q.delete()

        self._db.commit()

    def _delete_db_domain_records(self, tenant_id, domain_id):
        """
         delete all records for a given domain
         """
        LOG.debug("_delete_db_domain_records()")
        table = self.get_dns_table()

        # delete all records for the domain id
        q = table.filter_by(tenant_id=tenant_id, domain_id=domain_id)
        q.delete()

        self._db.commit()

    def create_domain(self, context, domain):
        LOG.debug("create_domain()")

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)

            self._add_soa_record(domain, servers)
            self._add_ns_records(domain, servers)

        self._sync_domains()

    def update_domain(self, context, domain):
        LOG.debug("update_domain()")

        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)

            self._update_soa_record(domain, servers)
            self._update_ns_records(domain, servers)

    def delete_domain(self, context, domain):
        LOG.debug("delete_domain()")

        if cfg.CONF[self.name].write_database:
            self._delete_db_domain_records(domain["tenant_id"], domain["id"])

        self._sync_domains()

    def create_server(self, context, server):
        LOG.debug("create_server()")

        raise exceptions.NotImplemented("create_server() for " "mysqlbind9 backend is " "not implemented")

        """
        TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
        if cfg.CONF[self.name].write_database:
            domains = self.central_service.find_domains(self.admin_context)

            for domain in domains:
                self._add_ns_records(domain, server)

        self._sync_domains()
        """

    #   This method could be a very expensive and should only be called
    #   (e.g., from central) only if the name of the existing server is
    #   changed.
    def update_server(self, context, server):
        LOG.debug("update_server()")

        raise exceptions.NotImplemented("update_server() for " "mysqlbind9 backend is " "not implemented")

        """
        TODO: this first-cut will not scale. Use bulk SQLAlchemy (core) queries
        if cfg.CONF[self.name].write_database:
            servers = self.central_service.find_servers(self.admin_context)
            domains = self.central_service.find_domains(self.admin_context)

            for domain in domains:
                self._update_ns_records(domain, servers)

        self._sync_domains()
        """

    def delete_server(self, context, server):
        LOG.debug("delete_server()")

        raise exceptions.NotImplemented("delete_server() for " "mysqlbind9 backend is" " not implemented")

        """
        TODO: For scale, Use bulk SQLAlchemy (core) queries
        """

    def create_record(self, context, domain, record):
        LOG.debug("create_record()")
        if cfg.CONF[self.name].write_database:
            self._insert_db_record(domain["tenant_id"], domain["id"], record)

    def update_record(self, context, domain, record):
        LOG.debug("update_record()")
        if cfg.CONF[self.name].write_database:
            self._update_db_record(domain["tenant_id"], record)

    def delete_record(self, context, domain, record):
        LOG.debug("Delete Record")
        if cfg.CONF[self.name].write_database:
            self._delete_db_record(domain["tenant_id"], record)

    def _sync_domains(self):
        """
        Update the zone file and reconfig rndc to update bind.
        Unike regular bind, this only needs to be done upon adding
        or deleting domains as mysqlbind takes care of updating
        bind upon regular record changes
        """
        LOG.debug("Synchronising domains")

        domains = self.central_service.find_domains(self.admin_context)

        output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path), "bind9")

        # Create the output folder tree if necessary
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)

        output_path = os.path.join(output_folder, "zones.config")

        abs_state_path = os.path.abspath(cfg.CONF.state_path)

        LOG.debug("Getting ready to write zones.config at %s" % output_path)

        # NOTE(CapTofu): Might have to adapt this later on?
        url = self.get_url_data()
        utils.render_template_to_file(
            "mysql-bind9-config.jinja2",
            output_path,
            domains=domains,
            state_path=abs_state_path,
            dns_server_type=cfg.CONF[self.name].dns_server_type,
            dns_db_schema=url["database"],
            dns_db_table=cfg.CONF[self.name].database_dns_table,
            dns_db_host=url["host"],
            dns_db_user=url["username"],
            dns_db_password=url["password"],
        )

        # only do this if domain create, domain delete
        rndc_call = ["rndc", "-s", cfg.CONF[self.name].rndc_host, "-p", str(cfg.CONF[self.name].rndc_port)]

        if cfg.CONF[self.name].rndc_config_file:
            rndc_call.extend(["-c", self.config.rndc_config_file])

        if cfg.CONF[self.name].rndc_key_file:
            rndc_call.extend(["-k", self.config.rndc_key_file])

        rndc_call.extend(["reconfig"])

        utils.execute(*rndc_call)
Beispiel #11
0
class API_DB(object):
    def __init__(self):
        # Create any tables that don't already exist
        self.createTables()

        self.db = SqlSoup(FLAGS.api_db_url)
        self.db_nets = self.db.networks
        self.db_ports = self.db.ports
        self.db_macs = self.db.macs
        self.db_bonds = self.db.bonds
        self.db_flowspace = self.db.flowspace
        self.db_net2slice = self.db.delegated_nets

    def createTables(self):
        engine = create_engine(FLAGS.api_db_url)
        data = MetaData(bind=engine)
        data.reflect()
        existing_tables = data.tables.keys()

        # Dictionary for database description
        # Format: {Table object /w primary Column: [list of extra Column objects]}
        db_schema = {
            Table('networks', data,
                    Column('network_id', String(255), primary_key=True), keep_existing=True) :
                [],

            Table('ports', data,
                    Column('id', Integer, primary_key=True, autoincrement=True), keep_existing=True) :
                [   Column('port_num', Integer),
                    Column('datapath_id', String(255)),
                    Column('network_id', String(255)),
                    Column('bond_id', String(255))  ],

            Table('macs', data,
                    Column('mac_address', String(255), primary_key=True), keep_existing=True) :
                [   Column('network_id', String(255))   ],

            Table('bonds', data,
                    Column('bond_id', String(255), primary_key=True), keep_existing=True) :
                [   Column('datapath_id', String(255)),
                    Column('network_id', String(255))   ],

            Table('flowspace', data,
                    Column('id', Integer, primary_key=True), keep_existing=True) :
                [   Column('datapath_id', String(255)),
                    Column('port_num', Integer),
                    Column('mac_address', String(255))  ],

            Table('delegated_nets', data,
                    Column('network_id', String(255), primary_key=True), keep_existing=True) :
                [   Column('slice', String(255))    ],
        }

        for tab, colList in db_schema.items():
            # Create table if it doesn't exist
            if tab.name not in existing_tables:
                tab.create()

            # Check columns and update if necessary
            for col in colList:
                if col.name not in tab.c.keys():
                    col.create(tab, populate_default=True)

    def checkConnection(self):
        try:
            # Do a simple query
            self.db.execute("show databases")
        except sqlexc.OperationalError:
            # Connection was interrupted for some reason, try restarting session
            self.db.session.close()
            self.db.session.rollback()

            # Try simple query again
            self.db.execute("show databases")
        except:
            # Unknown exception, raise
            raise

    ###########################################################################
    # Functions for retrieving database contents
    ###########################################################################
    def getNetworks(self):
        self.checkConnection()
        net_list = []
        for net in self.db_nets.all():
            net_list.append(net.network_id)

        return net_list

    def getPorts(self):
        self.checkConnection()
        port_list = []
        for port in self.db_ports.all():
            port_list.append((port.network_id, port.datapath_id, port.port_num, port.bond_id))

        return port_list

    def getMACs(self):
        self.checkConnection()
        mac_list = []
        for mac in self.db_macs.all():
            mac_list.append((mac.network_id, mac.mac_address))

        return mac_list

    def getBonds(self):
        self.checkConnection()
        bond_list = []
        for bond in self.db_bonds.all():
            bond_list.append((bond.bond_id, bond.datapath_id, bond.network_id))

        return bond_list

    def getFlowSpace(self):
        self.checkConnection()
        flowspace_list = []
        for flowspace in self.db_flowspace.all():
            flowspace_list.append((flowspace.id, flowspace.datapath_id, flowspace.port_num, flowspace.mac_address))

        return flowspace_list

    def getDelegatedNets(self):
        self.checkConnection()
        net2slice_list = []
        for net2slice in self.db_net2slice.all():
            net2slice_list.append((net2slice.network_id, net2slice.slice))

        return net2slice_list

    ###########################################################################
    # Functions for storing API calls into the database
    ###########################################################################
    def createNetwork(self, network_id, update=False):
        self.checkConnection()
        if not self.db_nets.get(network_id):
            self.db_nets.insert(network_id=network_id)
        else:
            if not update:
                raise NetworkAlreadyExist(network_id=network_id)

        self.db.commit()

    def updateNetwork(self, network_id):
        self.createNetwork(network_id, True)

    def deleteNetwork(self, network_id):
        self.checkConnection()
        entry = self.db_nets.get(network_id)
        if entry:
            self.db.delete(entry)
        else:
            raise NetworkNotFound(network_id=network_id)

        self.db.commit()

    def addMAC(self, network_id, mac):
        self.checkConnection()
        entry = self.db_macs.get(mac)
        # Check for existing entry
        if not entry:
            self.db_macs.insert(network_id=network_id, mac_address=mac)
        else:
            if entry.network_id == network_id or network_id == NW_ID_EXTERNAL:
                # If old network and new network the same, do nothing
                # Or if trying to change an existing net association to NW_ID_EXTERNAL, do nothing
                return
            elif entry.network_id == NW_ID_EXTERNAL:
                # Allow changing from NW_ID_EXTERNAL to a known network UUID
                entry.network_id = network_id
            else:
                raise MacAddressDuplicated(mac=mac)

        try:
            self.db.commit()
        except sqlexc.IntegrityError:
            # Most likely due to race condition between setting DB change
            # and packet arriving at next switch triggering a DB read
            self.db.session.rollback()

    def delMAC(self, mac):
        self.checkConnection()
        entry = self.db_macs.get(mac)
        if entry:
            self.db.delete(entry)
        else:
            raise MacAddressNotFound(mac=mac)

        self.db.commit()

    def createPort(self, network_id, dpid, port_num, update=False):
        self.checkConnection()
        # Check for existing entry
        dpid = dpid.lstrip('0')
        params = and_(self.db_ports.datapath_id==dpid,
                        self.db_ports.port_num==port_num)
        old_entry = self.db_ports.filter(params).first()

        if not old_entry:
            # If updating but didn't locate existing entry, raise exception?
            # For now, just insert the entry and return success
            self.db_ports.insert(network_id=network_id,
                                    datapath_id=dpid, port_num=port_num)
        else:
            if update:
                old_entry.network_id = network_id
            else:
                # Entry already exists for (dpid,port) <=> network
                raise PortAlreadyExist(network_id=network_id,
                                        dpid=dpid, port=port_num)

        self.db.commit()

    def updatePort(self, network_id, dpid, port_num):
        self.createPort(network_id, dpid, port_num, True)

    def deletePort(self, network_id, dpid, port_num):
        self.checkConnection()
        dpid = dpid.lstrip('0')
        params = and_(self.db_ports.datapath_id==dpid,
                        self.db_ports.port_num==port_num)
        entry = self.db_ports.filter(params).first()

        if entry:
            self.db.delete(entry)
        else:
            raise PortNotFound(network_id=network_id,
                                dpid=dpid, port=port_num)

        self.db.commit()

    def createBond(self, bond_id, dpid, network_id):
        self.checkConnection()
        # Check for existing entry
        dpid = dpid.lstrip('0')
        if not self.db_bonds.get(bond_id):
            self.db_bonds.insert(bond_id=bond_id, datapath_id=dpid, network_id=network_id)
        else:
            raise BondAlreadyExist(bond_id=bond_id)

        self.db.commit()

    def deleteBond(self, bond_id):
        self.checkConnection()
        entry = self.db_bonds.get(bond_id)

        if entry:
            self.db.delete(entry)
        else:
            # Do nothing
            pass

        # Delete any ports currently bonded to the bond_id
        for port in self.db_ports.all():
            if port.bond_id == bond_id:
                port.bond_id = None

        self.db.commit()

    def addPort_bond(self, bond_id, port_num):
        self.checkConnection()
        bondEntry = self.db_bonds.get(bond_id)
        if bondEntry:
            dpid = bondEntry.datapath_id
            network_id = bondEntry.network_id
        else:
            raise BondNotFound(bond_id=bond_id)

        params = and_(self.db_ports.datapath_id==dpid,
                        self.db_ports.network_id==network_id,
                        self.db_ports.port_num==port_num)
        entry = self.db_ports.filter(params).first()

        if entry:
            # Check for existing entry
            old_bond_id = entry.bond_id
            if not old_bond_id:
                entry.bond_id = bond_id
            else:
                raise BondPortAlreadyBonded(port=port_num, bond_id=old_bond_id)
        else:
            raise PortNotFound(network_id=network_id,
                                dpid=dpid, port=port_num)

        self.db.commit()

    def deletePort_bond(self, bond_id, port_num):
        self.checkConnection()
        bondEntry = self.db_bonds.get(bond_id)
        if bondEntry:
            dpid = bondEntry.datapath_id
        else:
            raise BondNotFound(bond_id=bond_id)

        params = and_(self.db_ports.datapath_id==dpid,
                        self.db_ports.port_num==port_num,
                        self.db_ports.bond_id==bond_id)
        entry = self.db_ports.filter(params).first()

        if entry:
            entry.bond_id = None
        else:
            raise BondPortNotFound(port=port_num, bond_id=bond_id)

        self.db.commit()

    def addFlowSpaceID(self, dpid, port_num, mac, id):
        self.checkConnection()
        entry = self.db_flowspace.get(id)
        if not entry:
            self.db_flowspace.insert(id=id, datapath_id=dpid, port_num=port_num, mac_address=mac)
        else:
            raise FlowSpaceIDAlreadyExist(flowspace_id=id)

        self.db.commit()

    def delFlowSpaceID(self, id):
        self.checkConnection()
        entry = self.db_flowspace.get(id)
        if entry:
            self.db.delete(entry)
        else:
            # Not found, raise exception?
            pass

        self.db.commit()

    def assignNetToSlice(self, sliceName, network_id):
        self.checkConnection()
        entry = self.db_net2slice.get(network_id)
        if not entry:
            self.db_net2slice.insert(network_id=network_id, slice=sliceName)
        else:
            raise NetworkAlreadyAssigned(network_id=network_id, sliceName=entry.slice)

        self.db.commit()

    def removeNetFromSlice(self, network_id):
        self.checkConnection()
        entry = self.db_net2slice.get(network_id)
        if entry:
            self.db.delete(entry)
        else:
            # Not found, raise exception?
            pass

        self.db.commit()
Beispiel #12
0
        try:
            archive_dir_path = os.path.dirname(msg_archive_path)
            if not os.path.exists(archive_dir_path):
                mk_dir(archive_dir_path)

            if archive_vol.compress_blobs == 1 and \
                    mail_item.size > archive_vol.compression_threshold:
                copy_compress(msg_path, msg_archive_path)
                print 'zipped',
            else:
                shutil.move(msg_path, msg_archive_path)

            set_perm(msg_archive_path)

            mail_item.volume_id = archive_vol.id
            mboxgroup_db.commit()

        except IOError, e:
            print 'ERROR:', e
            continue

        print 'OK'
        archived_msgs += 1

    current_time = datetime.now()
    diff_time = current_time - start_time
    if diff_time.seconds > xtime:
        print "Stopping archiving, time excedded."
        break

    i += 1
Beispiel #13
0
class DB:
    ##### INSTANTIATION
    def __init__(self, data_schema, db_url, metadata_table_name, scraper=None):
        self.scraper = scraper
        self.their_fields = copy.deepcopy(data_schema.their_fields)
        self.resolutions = data_schema.resolutions
        self.db_url = db_url

        self.our_fields = {
            'page_permalink': {
                'column': Column(String(1000, convert_unicode=True)),
            },
            'access_time': {
                'column': Column(Integer),
            },
            'doesnt_exist': {
                'column': Column(Boolean),
            },
            'we_couldnt_parse_it': {
                'column': Column(Boolean),
            },
            #'is_color = Column(Boolean)
        }

        resolutions_columns = []
        for resolution, data in self.resolutions.items():
            resolutions_columns.append((data['status_column_name'], {
                'column':
                Column(Boolean, default=False)
            }))
            resolutions_columns.append((data['url_column_name'], {
                'column':
                Column(String(1000, convert_unicode=True))
            }))
            resolutions_columns.append((data['too_big_column_name'], {
                'column':
                Column(Boolean, default=False)
            }))
        self.our_fields.update(dict(resolutions_columns))

        def column_type_to_column_obj(type):
            if type == 'string':
                return Column(Text(9000, convert_unicode=True))
            else:
                print "what the heck kind of type is that?!?!?!?"

        for index in self.their_fields.keys():
            self.their_fields[index]['column'] = column_type_to_column_obj(
                self.their_fields[index]['type'])

        ## glue all of the fields together
        self.all_fields = dict(self.their_fields.items() +
                               self.our_fields.items())

        ## generate the metadata class
        self.base = declarative_base()

        class OurMetadata(self.base):
            __tablename__ = data_schema.table_name
            id = Column(Integer, primary_key=True)

        for fieldname, fieldinfo in self.all_fields.items():
            setattr(OurMetadata, fieldname, fieldinfo['column'])

        ## create the db
        #self.db = SqlSoup(db_url + '?charset=utf8&use_unicode=0', expire_on_commit=True)
        from sqlalchemy.orm import scoped_session, sessionmaker
        self.db = SqlSoup(db_url + '?charset=utf8&use_unicode=0',
                          session=scoped_session(
                              sessionmaker(expire_on_commit=True)))
        self.db.engine.raw_connection().connection.text_factory = unicode

        # make the tables if they don't already exist
        self.base.metadata.create_all(self.db.engine)
        self.db.commit()

        # make it easier to grab metadata table object
        self.metadata_table = getattr(self.db, metadata_table_name)
        if not self.metadata_table:
            print "crap, something has gone really wrong. couldn't grab the metadata table"

        #TODO: i think that maybe i can remove this. but not sure. probs need for sqlite.
        self.db_lock = threading.Lock()

    ### SERIALIZATION

    # serialize data before putting it in the database
    def prep_data_for_insertion(self, data_dict):
        if not data_dict:
            return data_dict
        for key, data in data_dict.items():
            if key in self.all_fields and 'serialize' in self.all_fields[
                    key] and self.all_fields[key]['serialize']:
                data_dict[key] = json.dumps(data_dict[key])
        return data_dict

    # de-serialize and decode to unicode the data after pulling it from the database
    def re_objectify_data(self, data_dict):
        if not data_dict:
            return data_dict
        data_dict['id'] = int(data_dict['id'])
        for key, data in data_dict.items():
            if key in self.all_fields and 'serialize' in self.all_fields[
                    key] and self.all_fields[key]['serialize']:
                if data_dict[key]:
                    data_dict[key] = json.loads(data_dict[key])
            else:
                data_dict[key] = to_unicode_or_bust(data_dict[key])
        return data_dict

    ##### READ-ONLY OPERATIONS

    ### BASE: Actually grab things from the database.
    ### many of the below functions use these
    # NOTE: careful about using this directly. it doesn't "uncompress" the data after pulling it from the db
    def get_image_metadata(self, id):
        #with self.db_lock:
        return self.metadata_table.get(id)

    def get_image_metadata_dict(self, id):
        # we run this through dict() so that we're manipulating a copy, not the actual object, which it turns out is cached or something
        row = self.get_image_metadata(id)
        if not row:
            return None
        row_dict = dict(row.__dict__)
        objectified_dict = self.re_objectify_data(row_dict)
        del objectified_dict[
            '_sa_instance_state']  # sqlalchemy throws this sucker in. dont want it.
        return objectified_dict

    def get_resolution_url_column_name(self, resolution):
        return self.resolutions[resolution]['url_column_name']

    def get_resolution_url_column(self, resolution):
        column_name = self.get_resolution_url_column_name(resolution)
        return getattr(self.db, column_name)

    def get_resolution_url(self, resolution, id):
        row = self.metadata_table.get(id)
        url_column_name = self.get_resolution_url_column_name(resolution)
        return getattr(row, url_column_name)

    #TODO: pretty sure these are the same function
    def get_resolution_image_url(self, id, resolution):
        metadata_url_column_name = self.resolutions[resolution][
            'url_column_name']
        url = getattr(self.metadata_table.get(id), metadata_url_column_name)
        return url

    def get_resolution_status_column_name(self, resolution):
        return self.resolutions[resolution]['status_column_name']

    def get_resolution_status_column(self, resolution):
        the_status_column_name = self.get_resolution_status_column_name(
            resolution)
        the_status_column = getattr(self.metadata_table,
                                    the_status_column_name)
        return the_status_column

    def get_resolution_status(self, id, resolution):
        dict = self.get_image_metadata_dict(id)
        column_name = self.get_resolution_status_column_name(resolution)
        return dict.get(column_name)

    def get_resolution_too_big_column_name(self, resolution):
        return self.resolutions[resolution]['too_big_column_name']

    def get_resolution_too_big_column(self, resolution):
        column_name = self.get_resolution_too_big_column_name(resolution)
        column = getattr(self.metadata_table, column_name)
        return column

    def get_is_marked_as_too_big(self, id, resolution):
        dict = self.get_image_metadata_dict(id)
        too_big_column_name = self.get_resolution_too_big_column_name(
            resolution)
        if dict[too_big_column_name]:
            return True
        return False

    def get_valid_images(self):
        criteria = []
        for resolution in self.resolutions.keys():
            criteria.append(
                self.get_resolution_status_column(resolution) == True)
        where = sqlalchemy.or_(*criteria)
        return self.metadata_table.filter(where)

    def get_next_successful_image_id(self, id):
        where = self.metadata_table.id > id
        higher_id = self.get_valid_images().filter(where).first()
        if not higher_id:
            return id
        retval = int(higher_id.id)
        return retval

    def get_prev_successful_image_id(self, id):
        where = self.metadata_table.id < id
        lower_id = self.get_valid_images().filter(where).order_by(
            sqlalchemy.desc(self.metadata_table.id)).first()
        if not lower_id:
            return id
        retval = int(lower_id.id)
        return retval

    ## input: resolution, as a string (hires, lores, thumb)
    ## returns: list of tuples in form: (id, url)
    def get_set_images_to_dl(self, resolution):
        the_status_column = self.get_resolution_status_column(resolution)
        where = sqlalchemy.or_(the_status_column == False,
                               the_status_column == None)
        #rows_to_dl = self.metadata_table.filter(where).filter(sqlalchemy.not_(self.get_resolution_too_big_column(resolution) == True)).all()
        rows_to_dl = self.metadata_table.filter(where).all()
        ids_to_dl = map(lambda row: row.id, rows_to_dl)
        metadata_url_column_name = self.get_resolution_url_column_name(
            resolution)
        tuples = map(
            lambda id:
            (id, getattr(self.metadata_table.get(id), metadata_url_column_name)
             ), ids_to_dl)
        # throw away tuples that have a null value in either position
        # TODO: maybe we should throw an exception here?
        tuples = filter(lambda tuple: tuple[0] and tuple[1], tuples)
        return tuples

    def get_highest_id_in_our_db(self):
        try:
            id = int(
                self.metadata_table.order_by(
                    sqlalchemy.desc(self.metadata_table.id)).first().id)
        except:
            id = 1
        return id

    def get_random_valid_image_id(self):
        possibilities = self.get_valid_images()
        num_possibilities = possibilities.count()
        choice = random.randrange(num_possibilities)
        return possibilities[choice].id

    def get_num_images(self):
        # yeah, the below where statement really sucks
        # i can't just filter by != True. it returns 0 results. i don't know why.
        mywhere = sqlalchemy.or_(
            self.metadata_table.we_couldnt_parse_it == False,
            self.metadata_table.we_couldnt_parse_it == None)
        return self.metadata_table.filter(mywhere).count()

    ##### WRITE-ONLY OPERATIONS

    ### BASE: Actually insert or update a row in the database
    ### many of the below functions use these
    #NOTE: this only works if the primary key is 'id'
    def insert_or_update_table_row(self, table, new_data_dict):
        if not new_data_dict:
            print "you're trying to insert a blank dict. that's pretty lame."
            return False
        # merge the new and the old into a fresh dict
        existing_row = table.get(new_data_dict['id'])
        if existing_row:
            existing_row_data_dict = existing_row.__dict__
            final_row_data_dict = existing_row_data_dict
            for key, value in new_data_dict.items():
                final_row_data_dict[key] = value
            #write over the current row contents with it
            #with self.db_lock:
            self.db.delete(existing_row)
            self.db.commit()
        else:
            final_row_data_dict = new_data_dict
        #with self.db_lock:
        table.insert(**final_row_data_dict)
        self.db.commit()

    def store_metadata_row(self, metadata_dict):
        if not metadata_dict.has_key('we_couldnt_parse_it'):
            metadata_dict['we_couldnt_parse_it'] = 0
        metadata_dict = self.prep_data_for_insertion(metadata_dict)
        self.insert_or_update_table_row(self.metadata_table, metadata_dict)

    def mark_img_as_not_downloaded(self, id, resolution):
        status_column_name = self.get_resolution_status_column_name(resolution)
        data = {}
        data['id'] = id
        data[status_column_name] = False
        self.store_metadata_row(data)

    def mark_img_as_downloaded(self, id, resolution):
        status_column_name = self.get_resolution_status_column_name(resolution)
        data = {}
        data['id'] = id
        data[status_column_name] = True
        self.store_metadata_row(data)

    def mark_img_as_too_big(self, id, resolution):
        status_column_name = self.get_resolution_too_big_column_name(
            resolution)
        data = {}
        data['id'] = id
        data[status_column_name] = True
        self.store_metadata_row(data)

    # DELETE EVERYTHING. CAREFUL!
    def truncate_all_tables(self):
        print "================================"
        print "LIKE SERIOUSLY I AM ABOUT TO DELETE ALL THE TABLES RIGHT NOW OH BOY"
        print self.db_url
        print "================================"
        meta = MetaData(self.db.engine)
        meta.reflect()
        meta.drop_all()
        meta.create_all()
        '''
        for table in reversed(self.base.metadata.sorted_tables):
            print table
            table.delete()
            self.db.commit()
        self.base.metadata.create_all(self.db.engine)
        self.db.commit()
        '''

    ### HELPERS

    def get_field_key_by_full_name(self, full_name):
        for key, data in self.their_fields.items():
            if not data['full_name']:
                continue
            if data['full_name'] == full_name:
                return key
        return False

    ##### OTHER
    def repr_as_html(self, image_as_dict,
                     image_resolution_to_local_file_location_fxn):
        if not image_as_dict:
            return u""
        floorified = usable_image_scraper.scraper.floorify(image_as_dict['id'])
        id_zfilled = str(image_as_dict['id']).zfill(5)
        image_urls = {}
        for resolution in self.resolutions:
            image_urls[
                resolution] = image_resolution_to_local_file_location_fxn(
                    resolution)

        # add link rel=license
        #image_as_dict['copyright'] = image_as_dict['copyright'].strip("'").replace('None', '<a href="http://creativecommons.org/licenses/publicdomain/" rel="license">None</a>')

        image_as_dict['next_id'] = int(image_as_dict['id']) + 1
        image_as_dict['prev_id'] = int(image_as_dict['id']) - 1

        image_as_dict['their_data'] = u''
        for key, data in self.their_fields.items():
            if not key in image_as_dict or not image_as_dict[key]:
                continue
            html_block = '<p class="datapoint">'
            # if there's a pre-perscribed way to represent this field:
            html_block += '<label for="' + key + '">' + self.their_fields[key][
                'full_name'] + ': </label>'
            rdfa_clause = ''
            if 'dc_mapping' in data:
                rdfa_clause = ' property="' + data['dc_mapping'] + '"'
            if 'repr_as_html' in data:
                html_block += data['repr_as_html'](image_as_dict[key])
            # if not:
            else:
                html_block += '<span id="' + key + '"' + rdfa_clause + '>' + unicode(
                    image_as_dict[key]) + '</span>'
            html_block += '</p>'
            image_as_dict['their_data'] += html_block

        def get_template_str():
            template_file = 'django_template.html'
            path = os.path.dirname(__file__)
            relpath = os.path.relpath(path)
            template_relpath = relpath + '/' + template_file
            fp = open(template_relpath, 'r')
            template_as_str = fp.read()
            return template_as_str

        # the table of image downloads
        image_as_dict['download_links'] = u'<table id="download_links">'
        for resolution, data in self.scraper.resolutions.items():
            image_as_dict['download_links'] += u'<tr>'
            image_as_dict['download_links'] += u'<td>' + resolution + ':</td>'
            orig_url = self.get_resolution_url(resolution, image_as_dict['id'])
            #image_as_dict['download_links'] += u'<td><a href="' + orig_url + '">' + self.scraper.abbrev.upper() + '</a></td>'
            image_as_dict[
                'download_links'] += u'<td><a href="' + orig_url + '">Original</a></td>'
            # if we've downloaded the image
            if self.get_resolution_status(image_as_dict['id'], resolution):
                our_url = self.scraper.get_web_resolution_local_image_location(
                    resolution, image_as_dict['id'], remote_url=orig_url)
                image_as_dict[
                    'download_links'] += u'<td><a href="' + our_url + '">Usable Image Mirror</a></td>'
            else:
                image_as_dict['download_links'] += u'<td></td>'
        image_as_dict['download_links'] += u'</table>'

        template_str = get_template_str()
        template = Template(template_str)
        context = Context({'image': image_as_dict, 'image_urls': image_urls})
        html = template.render(context)
        return html
Beispiel #14
0
class Upgrade:
    def __init__(self, engine):
        self.__db = SqlSoup(engine)

    def process(self):
        """Main function that run the update process"""
        #_upgrade_system_info_structure()
        old_app_version = self.get_current_app_version()
        old_db_version = self.get_current_db_version()
        print("=== Upgrade process")
        print("\t> Current version (application : %s, database = %s)" 
              % (self.get_current_app_version(), self.get_current_db_version()))
        print("\t> New version (application : %s, database = %s)" 
              % (self.get_new_app_version(), self.get_new_db_version()))
        self._sanity_check_before_upgrade()
        # Database upgrade
        while upgrade_scripts.db_upgrade(self):
            pass
        if old_db_version == self.get_current_db_version():
            print("\tThe database was NOT upgraded: nothing to do!")
        
        # Application upgrade
        while upgrade_scripts.app_upgrade(self):
            pass
        if old_app_version == self.get_current_app_version():
            print("\tThe application was NOT upgraded: nothing to do!")

        print("=== Upgrade process terminated")
    
    def set_version(self, app_version, db_version):
        """Set the version of the application and the database"""
        self.update_app_version(app_version)
        self.update_db_version(db_version)
        self.commit()
    
    def commit(self):
        self.__db.commit()
        
    #####################
    # Utility functions #
    #####################

    def _sanity_check_before_upgrade(self):
        """Check that the upgrade process can be run"""
        
        # We use NormalizedVersion to be able to make comparisons
        new_db_version = self.get_new_db_version()
        new_app_version = self.get_new_app_version()
        current_db_version = self.get_current_db_version()
        current_app_version = self.get_current_app_version()

        if new_db_version > new_app_version:
            print("Internal error")
            print("The new database version number (%s) can't be superior to the application one (%s)"
                  % (new_db_version, new_app_version))
            self._abort_upgrade_process()
        
        if current_db_version > new_db_version:
            print("Something is wrong with your installation:")
            print("Your database version number (%s) is superior to the one you're trying to install (%s)" 
                  % (current_db_version, new_db_version))
            self._abort_upgrade_process()

        if current_app_version > new_app_version:
            print("Something is wrong with your installation:")
            print("Your application version number (%s) is superior to the one you're trying to install (%s)" 
                  % (current_app_version, new_app_version))
            self._abort_upgrade_process()

        if current_db_version > current_app_version:
            print("Something is wrong with your installation:")
            print("Your database version number (%s) is superior to the application one (%s)" 
                  % (current_db_version, current_app_version))
            self._abort_upgrade_process()

    def get_current_db_version(self):
        """Return the current version of the database"""
        db_version = self._sql_execute("SELECT db_version FROM core_system_info").fetchone()[0]
        if db_version is None or db_version == '':
            # Should only happen for the first upgrade using this script
            return NormalizedVersion('0.1.0')
        else:
            return NormalizedVersion(self._suggest_normalized_version(db_version))

    def get_new_db_version(self):
        """Return the version of the database we should upgrade to (normalized version)"""
        return NormalizedVersion(self._suggest_normalized_version(DB_VERSION))

    def update_db_version(self, db_version):
        """Update the version of the database"""
        if self._sql_execute("SELECT db_version FROM core_system_info").fetchone() is None:
            sql = "INSERT INTO core_system_info (db_version) VALUES('%s')" % db_version
        else:
            sql = "UPDATE core_system_info SET db_version='%s'" % db_version
        self._sql_execute(sql)

    def get_current_app_version(self):
        """Return the current version of the application"""
        try:
            
            app_version = self._sql_execute("SELECT app_version FROM core_system_info").fetchone()[0]
            # Should only happen if the 'app_version' column doesn't exist (first application upgrade using this script)
            if app_version is None or app_version == '':
                app_version = NormalizedVersion('0.1.0')
            return NormalizedVersion(self._suggest_normalized_version(app_version))
        except Exception:
            return NormalizedVersion('0.1.0')

    def get_new_app_version(self):
        """Return the version of the application we should upgrade to (normalized version)"""
        return NormalizedVersion(self._suggest_normalized_version(__version__))

    def update_app_version(self, app_version):
        """Update the version of the application"""
        if self._sql_execute("SELECT app_version FROM core_system_info").fetchone() is None:
            sql = "INSERT INTO core_system_info (app_version) VALUES('%s')" % app_version
        else:
            sql = "UPDATE core_system_info SET app_version='%s'" % app_version
        self._sql_execute(sql)

    def _suggest_normalized_version(self, version):
        n_version = suggest_normalized_version(version)
        if n_version is None:
            print("Error : invalid version number : %s" % version)
            print("See : http://wiki.domogik.org/Release_numbering")
            self._abort_install_process()
        else:
            return n_version

    def _sql_execute(self, sql_code):
        return self.__db.execute(sql_code)

    def _abort_upgrade_process(self, message=""):
        print("Upgrade process aborted : %s" % message)
        sys.exit(1)
Beispiel #15
0
class DB:
    ##### INSTANTIATION
    def __init__(self, data_schema, db_url, metadata_table_name, scraper=None):
        self.scraper = scraper
        self.their_fields = copy.deepcopy(data_schema.their_fields)
        self.resolutions = data_schema.resolutions
        self.db_url = db_url

        self.our_fields = {
            "page_permalink": {"column": Column(String(1000, convert_unicode=True))},
            "access_time": {"column": Column(Integer)},
            "doesnt_exist": {"column": Column(Boolean)},
            "we_couldnt_parse_it": {"column": Column(Boolean)},
            #'is_color = Column(Boolean)
        }

        resolutions_columns = []
        for resolution, data in self.resolutions.items():
            resolutions_columns.append((data["status_column_name"], {"column": Column(Boolean, default=False)}))
            resolutions_columns.append(
                (data["url_column_name"], {"column": Column(String(1000, convert_unicode=True))})
            )
            resolutions_columns.append((data["too_big_column_name"], {"column": Column(Boolean, default=False)}))
        self.our_fields.update(dict(resolutions_columns))

        def column_type_to_column_obj(type):
            if type == "string":
                return Column(Text(9000, convert_unicode=True))
            else:
                print "what the heck kind of type is that?!?!?!?"

        for index in self.their_fields.keys():
            self.their_fields[index]["column"] = column_type_to_column_obj(self.their_fields[index]["type"])

        ## glue all of the fields together
        self.all_fields = dict(self.their_fields.items() + self.our_fields.items())

        ## generate the metadata class
        self.base = declarative_base()

        class OurMetadata(self.base):
            __tablename__ = data_schema.table_name
            id = Column(Integer, primary_key=True)

        for fieldname, fieldinfo in self.all_fields.items():
            setattr(OurMetadata, fieldname, fieldinfo["column"])

        ## create the db
        # self.db = SqlSoup(db_url + '?charset=utf8&use_unicode=0', expire_on_commit=True)
        from sqlalchemy.orm import scoped_session, sessionmaker

        self.db = SqlSoup(
            db_url + "?charset=utf8&use_unicode=0", session=scoped_session(sessionmaker(expire_on_commit=True))
        )
        self.db.engine.raw_connection().connection.text_factory = unicode

        # make the tables if they don't already exist
        self.base.metadata.create_all(self.db.engine)
        self.db.commit()

        # make it easier to grab metadata table object
        self.metadata_table = getattr(self.db, metadata_table_name)
        if not self.metadata_table:
            print "crap, something has gone really wrong. couldn't grab the metadata table"

        # TODO: i think that maybe i can remove this. but not sure. probs need for sqlite.
        self.db_lock = threading.Lock()

    ### SERIALIZATION

    # serialize data before putting it in the database
    def prep_data_for_insertion(self, data_dict):
        if not data_dict:
            return data_dict
        for key, data in data_dict.items():
            if key in self.all_fields and "serialize" in self.all_fields[key] and self.all_fields[key]["serialize"]:
                data_dict[key] = json.dumps(data_dict[key])
        return data_dict

    # de-serialize and decode to unicode the data after pulling it from the database
    def re_objectify_data(self, data_dict):
        if not data_dict:
            return data_dict
        data_dict["id"] = int(data_dict["id"])
        for key, data in data_dict.items():
            if key in self.all_fields and "serialize" in self.all_fields[key] and self.all_fields[key]["serialize"]:
                if data_dict[key]:
                    data_dict[key] = json.loads(data_dict[key])
            else:
                data_dict[key] = to_unicode_or_bust(data_dict[key])
        return data_dict

    ##### READ-ONLY OPERATIONS

    ### BASE: Actually grab things from the database.
    ### many of the below functions use these
    # NOTE: careful about using this directly. it doesn't "uncompress" the data after pulling it from the db
    def get_image_metadata(self, id):
        # with self.db_lock:
        return self.metadata_table.get(id)

    def get_image_metadata_dict(self, id):
        # we run this through dict() so that we're manipulating a copy, not the actual object, which it turns out is cached or something
        row = self.get_image_metadata(id)
        if not row:
            return None
        row_dict = dict(row.__dict__)
        objectified_dict = self.re_objectify_data(row_dict)
        del objectified_dict["_sa_instance_state"]  # sqlalchemy throws this sucker in. dont want it.
        return objectified_dict

    def get_resolution_url_column_name(self, resolution):
        return self.resolutions[resolution]["url_column_name"]

    def get_resolution_url_column(self, resolution):
        column_name = self.get_resolution_url_column_name(resolution)
        return getattr(self.db, column_name)

    def get_resolution_url(self, resolution, id):
        row = self.metadata_table.get(id)
        url_column_name = self.get_resolution_url_column_name(resolution)
        return getattr(row, url_column_name)

    # TODO: pretty sure these are the same function
    def get_resolution_image_url(self, id, resolution):
        metadata_url_column_name = self.resolutions[resolution]["url_column_name"]
        url = getattr(self.metadata_table.get(id), metadata_url_column_name)
        return url

    def get_resolution_status_column_name(self, resolution):
        return self.resolutions[resolution]["status_column_name"]

    def get_resolution_status_column(self, resolution):
        the_status_column_name = self.get_resolution_status_column_name(resolution)
        the_status_column = getattr(self.metadata_table, the_status_column_name)
        return the_status_column

    def get_resolution_status(self, id, resolution):
        dict = self.get_image_metadata_dict(id)
        column_name = self.get_resolution_status_column_name(resolution)
        return dict.get(column_name)

    def get_resolution_too_big_column_name(self, resolution):
        return self.resolutions[resolution]["too_big_column_name"]

    def get_resolution_too_big_column(self, resolution):
        column_name = self.get_resolution_too_big_column_name(resolution)
        column = getattr(self.metadata_table, column_name)
        return column

    def get_is_marked_as_too_big(self, id, resolution):
        dict = self.get_image_metadata_dict(id)
        too_big_column_name = self.get_resolution_too_big_column_name(resolution)
        if dict[too_big_column_name]:
            return True
        return False

    def get_valid_images(self):
        criteria = []
        for resolution in self.resolutions.keys():
            criteria.append(self.get_resolution_status_column(resolution) == True)
        where = sqlalchemy.or_(*criteria)
        return self.metadata_table.filter(where)

    def get_next_successful_image_id(self, id):
        where = self.metadata_table.id > id
        higher_id = self.get_valid_images().filter(where).first()
        if not higher_id:
            return id
        retval = int(higher_id.id)
        return retval

    def get_prev_successful_image_id(self, id):
        where = self.metadata_table.id < id
        lower_id = self.get_valid_images().filter(where).order_by(sqlalchemy.desc(self.metadata_table.id)).first()
        if not lower_id:
            return id
        retval = int(lower_id.id)
        return retval

    ## input: resolution, as a string (hires, lores, thumb)
    ## returns: list of tuples in form: (id, url)
    def get_set_images_to_dl(self, resolution):
        the_status_column = self.get_resolution_status_column(resolution)
        where = sqlalchemy.or_(the_status_column == False, the_status_column == None)
        # rows_to_dl = self.metadata_table.filter(where).filter(sqlalchemy.not_(self.get_resolution_too_big_column(resolution) == True)).all()
        rows_to_dl = self.metadata_table.filter(where).all()
        ids_to_dl = map(lambda row: row.id, rows_to_dl)
        metadata_url_column_name = self.get_resolution_url_column_name(resolution)
        tuples = map(lambda id: (id, getattr(self.metadata_table.get(id), metadata_url_column_name)), ids_to_dl)
        # throw away tuples that have a null value in either position
        # TODO: maybe we should throw an exception here?
        tuples = filter(lambda tuple: tuple[0] and tuple[1], tuples)
        return tuples

    def get_highest_id_in_our_db(self):
        try:
            id = int(self.metadata_table.order_by(sqlalchemy.desc(self.metadata_table.id)).first().id)
        except:
            id = 1
        return id

    def get_random_valid_image_id(self):
        possibilities = self.get_valid_images()
        num_possibilities = possibilities.count()
        choice = random.randrange(num_possibilities)
        return possibilities[choice].id

    def get_num_images(self):
        # yeah, the below where statement really sucks
        # i can't just filter by != True. it returns 0 results. i don't know why.
        mywhere = sqlalchemy.or_(
            self.metadata_table.we_couldnt_parse_it == False, self.metadata_table.we_couldnt_parse_it == None
        )
        return self.metadata_table.filter(mywhere).count()

    ##### WRITE-ONLY OPERATIONS

    ### BASE: Actually insert or update a row in the database
    ### many of the below functions use these
    # NOTE: this only works if the primary key is 'id'
    def insert_or_update_table_row(self, table, new_data_dict):
        if not new_data_dict:
            print "you're trying to insert a blank dict. that's pretty lame."
            return False
        # merge the new and the old into a fresh dict
        existing_row = table.get(new_data_dict["id"])
        if existing_row:
            existing_row_data_dict = existing_row.__dict__
            final_row_data_dict = existing_row_data_dict
            for key, value in new_data_dict.items():
                final_row_data_dict[key] = value
            # write over the current row contents with it
            # with self.db_lock:
            self.db.delete(existing_row)
            self.db.commit()
        else:
            final_row_data_dict = new_data_dict
        # with self.db_lock:
        table.insert(**final_row_data_dict)
        self.db.commit()

    def store_metadata_row(self, metadata_dict):
        if not metadata_dict.has_key("we_couldnt_parse_it"):
            metadata_dict["we_couldnt_parse_it"] = 0
        metadata_dict = self.prep_data_for_insertion(metadata_dict)
        self.insert_or_update_table_row(self.metadata_table, metadata_dict)

    def mark_img_as_not_downloaded(self, id, resolution):
        status_column_name = self.get_resolution_status_column_name(resolution)
        data = {}
        data["id"] = id
        data[status_column_name] = False
        self.store_metadata_row(data)

    def mark_img_as_downloaded(self, id, resolution):
        status_column_name = self.get_resolution_status_column_name(resolution)
        data = {}
        data["id"] = id
        data[status_column_name] = True
        self.store_metadata_row(data)

    def mark_img_as_too_big(self, id, resolution):
        status_column_name = self.get_resolution_too_big_column_name(resolution)
        data = {}
        data["id"] = id
        data[status_column_name] = True
        self.store_metadata_row(data)

    # DELETE EVERYTHING. CAREFUL!
    def truncate_all_tables(self):
        print "================================"
        print "LIKE SERIOUSLY I AM ABOUT TO DELETE ALL THE TABLES RIGHT NOW OH BOY"
        print self.db_url
        print "================================"
        meta = MetaData(self.db.engine)
        meta.reflect()
        meta.drop_all()
        meta.create_all()

        """
        for table in reversed(self.base.metadata.sorted_tables):
            print table
            table.delete()
            self.db.commit()
        self.base.metadata.create_all(self.db.engine)
        self.db.commit()
        """

    ### HELPERS

    def get_field_key_by_full_name(self, full_name):
        for key, data in self.their_fields.items():
            if not data["full_name"]:
                continue
            if data["full_name"] == full_name:
                return key
        return False

    ##### OTHER
    def repr_as_html(self, image_as_dict, image_resolution_to_local_file_location_fxn):
        if not image_as_dict:
            return u""
        floorified = usable_image_scraper.scraper.floorify(image_as_dict["id"])
        id_zfilled = str(image_as_dict["id"]).zfill(5)
        image_urls = {}
        for resolution in self.resolutions:
            image_urls[resolution] = image_resolution_to_local_file_location_fxn(resolution)

        # add link rel=license
        # image_as_dict['copyright'] = image_as_dict['copyright'].strip("'").replace('None', '<a href="http://creativecommons.org/licenses/publicdomain/" rel="license">None</a>')

        image_as_dict["next_id"] = int(image_as_dict["id"]) + 1
        image_as_dict["prev_id"] = int(image_as_dict["id"]) - 1

        image_as_dict["their_data"] = u""
        for key, data in self.their_fields.items():
            if not key in image_as_dict or not image_as_dict[key]:
                continue
            html_block = '<p class="datapoint">'
            # if there's a pre-perscribed way to represent this field:
            html_block += '<label for="' + key + '">' + self.their_fields[key]["full_name"] + ": </label>"
            rdfa_clause = ""
            if "dc_mapping" in data:
                rdfa_clause = ' property="' + data["dc_mapping"] + '"'
            if "repr_as_html" in data:
                html_block += data["repr_as_html"](image_as_dict[key])
            # if not:
            else:
                html_block += '<span id="' + key + '"' + rdfa_clause + ">" + unicode(image_as_dict[key]) + "</span>"
            html_block += "</p>"
            image_as_dict["their_data"] += html_block

        def get_template_str():
            template_file = "django_template.html"
            path = os.path.dirname(__file__)
            relpath = os.path.relpath(path)
            template_relpath = relpath + "/" + template_file
            fp = open(template_relpath, "r")
            template_as_str = fp.read()
            return template_as_str

        # the table of image downloads
        image_as_dict["download_links"] = u'<table id="download_links">'
        for resolution, data in self.scraper.resolutions.items():
            image_as_dict["download_links"] += u"<tr>"
            image_as_dict["download_links"] += u"<td>" + resolution + ":</td>"
            orig_url = self.get_resolution_url(resolution, image_as_dict["id"])
            # image_as_dict['download_links'] += u'<td><a href="' + orig_url + '">' + self.scraper.abbrev.upper() + '</a></td>'
            image_as_dict["download_links"] += u'<td><a href="' + orig_url + '">Original</a></td>'
            # if we've downloaded the image
            if self.get_resolution_status(image_as_dict["id"], resolution):
                our_url = self.scraper.get_web_resolution_local_image_location(
                    resolution, image_as_dict["id"], remote_url=orig_url
                )
                image_as_dict["download_links"] += u'<td><a href="' + our_url + '">Usable Image Mirror</a></td>'
            else:
                image_as_dict["download_links"] += u"<td></td>"
        image_as_dict["download_links"] += u"</table>"

        template_str = get_template_str()
        template = Template(template_str)
        context = Context({"image": image_as_dict, "image_urls": image_urls})
        html = template.render(context)
        return html