Ejemplo n.º 1
0
    def __init__(self, autogetconfig=True):
        from gluon import current
        self.request = current.request
        self.ongae = self.request.env.web2py_runtime_gae
        self.T = current.T
        self.cache = current.cache
        from gluon import DAL
        if self.ongae:
            self._db = DAL("google:datastore")
        else:
            self._db = DAL("sqlite://config_movuca.sqlite")

        self.define_tables()
        if autogetconfig:
            self.get_config()
Ejemplo n.º 2
0
 def send_heartbeat(self,counter):
     if not self.db_thread:
         logging.debug('thread building own DAL object')    
         self.db_thread = DAL(self.db._uri,folder = self.db._adapter.folder)
         self.define_tables(self.db_thread,migrate=False)
     try:
         db = self.db_thread
         sw, st = db.scheduler_worker, db.scheduler_task
         now = datetime.datetime.now()
         expiration = now-datetime.timedelta(seconds=self.heartbeat*3)    
         # record heartbeat
         logging.debug('........recording heartbeat')    
         if not db(sw.worker_name==self.worker_name)\
                 .update(last_heartbeat = now, status = ACTIVE):
             sw.insert(status = ACTIVE,worker_name = self.worker_name,
                       first_heartbeat = now,last_heartbeat = now)
         if counter % 10 == 0:
             # deallocate jobs assigned to inactive workers and requeue them
             logging.debug('    freeing workers that have not sent heartbeat')    
             inactive_workers = db(sw.last_heartbeat<expiration)
             db(st.assigned_worker_name.belongs(
                     inactive_workers._select(sw.worker_name)))\
                     (st.status.belongs((RUNNING,ASSIGNED,QUEUED)))\
                     .update(assigned_worker_name='',status=QUEUED)
             inactive_workers.delete()
         db.commit()
     except:
         db.rollback()
     time.sleep(self.heartbeat)
Ejemplo n.º 3
0
def opendb():
    global db
    if (db == None):
        #		print "open database DAL"
        db = DAL('sqlite://storage.sqlite',
                 folder="%s/web2py/applications/gate/databases" % cwd)
        execfile("%s/web2py/applications/gate/models/db_gate.py" % cwd)
Ejemplo n.º 4
0
 def get_db(self):
     """
         Return the connected db
     """
     if not os.path.exists(self.get_option('dbmetadata')):
         os.makedirs(self.get_option('dbmetadata'))
     db=DAL(self.get_option('database'),lazy_tables=True,folder=self.get_option("dbmetadata"))
     db.define_table('storages',
                 Field('storagename','string'),
                 Field('creation_ts','datetime',
                       default=datetime.datetime.now()),
                 Field('modified_ts','datetime',
                       default=datetime.datetime.now(),
                       update=datetime.datetime.now()),
                 )
     db.define_table('files',
                 Field('storages_id',db.storages),
                 Field('parent_id','reference files'),
                 Field('filename','string'),
                 Field('description','string'),
                 Field('mime','string'),
                 Field('ftype','string'),
                 Field('mode','integer'),
                 Field('inode','integer'),
                 Field('dev','string'),
                 Field('nlink','integer'),
                 Field('uid','integer'),
                 Field('gid','integer'),
                 Field('size','double'),
                 Field('ctime','datetime'),
                 Field('mtime','datetime'),
                 Field('atime','datetime'),
                 )
     return db
Ejemplo n.º 5
0
    def copyDB(self):
        other_db = DAL("{0}://{1}".format(self.targetdbType,
                                          self.targetdbName),
                       folder=self.targetFolder)

        print 'creating tables...'

        for table in self.db:
            other_db.define_table(table._tablename,
                                  *[field for field in table])
            '''
            should there be an option to truncAte target DB?
            if yes, then change args to allow for choice
            and set self.trancate to the art value

            if self.truncate==True:
                other_db[table._tablename].truncate()
            '''

        print 'exporting data...'
        self.db.export_to_csv_file(open('tmp.sql', 'wb'))

        print 'importing data...'
        other_db.import_from_csv_file(open('tmp.sql', 'rb'))
        other_db.commit()
        print 'done!'
        print 'Attention: do not run this program again or you end up with duplicate records'
Ejemplo n.º 6
0
    def post(self, strbools=[],
                   strints=[],
                   ):
        """
            Cleanup after migration

            @param strbools : List of tuples (tablename, fieldname) to convert from string/integer to bools
            @param strints : List of tuples (tablename, fieldname) to convert from string to integer
        """

        db = self.db

        # @ToDo: Do prepops of new tables

        # Restore data from backup
        folder = "%s/databases/backup" % current.request.folder
        db_bak = DAL("sqlite://backup.db",
                     folder=folder,
                     auto_import=True,
                     migrate=False)

        for tablename, fieldname in strints:
            newtable = db[tablename]
            newrows = db(newtable.id > 0).select(newtable.id)
            oldtable = db_bak[tablename]
            oldrows = db_bak(oldtable.id > 0).select(oldtable.id,
                                                     oldtable[fieldname])
            oldvals = oldrows.as_dict()
            for row in newrows:
                id = row.id
                val = oldvals[id][fieldname]
                if not val:
                    continue
                try:
                    vars = {fieldname : int(val)}
                except:
                    current.log.warning("S3Migrate: Unable to convert %s to an integer - skipping" % val)
                else:
                    db(newtable.id == id).update(**vars)

        for tablename, fieldname in strbools:
            to_bool = self.to_bool
            newtable = db[tablename]
            newrows = db(newtable.id > 0).select(newtable.id)
            oldtable = db_bak[tablename]
            oldrows = db_bak(oldtable.id > 0).select(oldtable.id,
                                                     oldtable[fieldname])
            oldvals = oldrows.as_dict()
            for row in newrows:
                id = row.id
                val = oldvals[id][fieldname]
                if not val:
                    continue
                val = to_bool(val)
                if val:
                    vars = {fieldname : val}
                    db(newtable.id == id).update(**vars)

        db.commit()
Ejemplo n.º 7
0
    def __init__(self):

        request = current.request

        # Load s3cfg => but why do this so complicated?
        #name = "applications.%s.modules.s3cfg" % request.application
        #s3cfg = __import__(name)
        #for item in name.split(".")[1:]:
        ## Remove the dot
        #s3cfg = getattr(s3cfg, item)
        #settings = s3cfg.S3Config()

        # Can use normal import here since executed in web2py environment:
        import s3cfg
        settings = s3cfg.S3Config()

        # Pass into template
        current.deployment_settings = settings

        # Read settings
        model = "%s/models/000_config.py" % request.folder
        code = getcfs(model, model, None)
        response = current.response

        # Needed as some Templates look at this & we don't wish to crash:
        response.s3 = Storage()

        # Global variables for 000_config.py
        environment = build_environment(request, response, current.session)
        environment["settings"] = settings
        # Some (older) 000_config.py also use "deployment_settings":
        environment["deployment_settings"] = settings
        # For backwards-compatibility with older 000_config.py:
        #def template_path():
        #    # When you see this warning, you should update 000_config.py
        #    # See: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates/Migration#Changesin000_config.py
        #    print "template_path() is deprecated, please update 000_config.py"
        #    # Return just any valid path to make sure the path-check succeeds,
        #    # => modern S3Config will find the template itself
        #    return request.folder
        #environment["template_path"] = template_path
        environment["os"] = os
        environment["Storage"] = Storage

        # Execute 000_config.py
        restricted(code, environment, layer=model)

        self.db_engine = settings.get_database_type()
        (db_string, pool_size) = settings.get_database_string()

        # Get a handle to the database
        self.db = DAL(
            db_string,
            #folder="%s/databases" % request.folder,
            auto_import=True,
            # @ToDo: Set to False until we migrate
            migrate_enabled=True,
        )
Ejemplo n.º 8
0
def append():
    db=DAL('sqlite://storage.sqlite', folder='/home/www-data/web2py/applications/tgmonitor/databases')
    db.define_table('tg_load',
        Field('check_date','datetime'),
        Field('tg_number','integer', notnull=True),
        Field('busy', 'integer'),
        Field('installed', 'integer')
        )

    db.tg_load.insert(check_date='',tg_number=2, busy=45, installed=60)
    db.commit()
Ejemplo n.º 9
0
    def backup(self):
        """
            Backup the database to a local SQLite database

            @ToDo: Option to use a temporary DB in Postgres/MySQL as this takes
                   too long for a large DB
        """

        import os

        db = self.db
        folder = "%s/databases/backup" % current.request.folder

        # Create clean folder for the backup
        if os.path.exists(folder):
            import shutil
            shutil.rmtree(folder)
            import time
            time.sleep(1)
        os.mkdir(folder)

        # Setup backup database
        db_bak = DAL("sqlite://backup.db", folder=folder)

        # Copy Table structure
        for tablename in db.tables:
            if tablename == "gis_location":
                table = db[tablename]
                fields = [
                    table[field] for field in table.fields
                    if field != "the_geom"
                ]
                db_bak.define_table(tablename, *fields)
            else:
                db_bak.define_table(tablename, db[tablename])

        # Copy Data
        import csv
        csv.field_size_limit(2**20 * 100)  # 100 megs
        filename = "%s/data.csv" % folder
        file = open(filename, "w")
        db.export_to_csv_file(file)
        file.close()
        file = open(filename, "r")
        db_bak.import_from_csv_file(file, unique="uuid2")  # designed to fail
        file.close()
        db_bak.commit()

        # Pass handle back to other functions
        self.db_bak = db_bak
Ejemplo n.º 10
0
def connect_db(init_schema=True):
    db_file = os.path.join(BASE_FOLDER, "sync.db")
    con = DAL('sqlite://' + db_file,
              pool_size=10,
              check_reserved=['all'],
              lazy_tables=lazy_tables,
              fake_migrate=fake_migrate,
              fake_migrate_all=fake_migrate_all,
              migrate=migrate)  # fake_migrate_all=True
    con.executesql('PRAGMA journal_mode=WAL')

    if init_schema is True:
        init_db_schema(con)

    return con
Ejemplo n.º 11
0
def adding_new_fields(new_unique_field,changed_table):
    """
    This function adds a new_uniwue_field into the changed_table , while keeping all the rest of 
    the properties of the table ubchanged
    """
    database_string = "sqlite://storage.db"
    old_database_folder = "%s/applications/%s/databases" % (WEB2PY_PATH, APP)
    temp_db = DAL( database_string, folder = old_database_folder, migrate_enabled=True ,migrate = True)
    new_field = Field(new_unique_field,"integer")
    try:
        changed_table_primary_key = db[changed_table]._primarykey
    except KeyError:
        changed_table_primary_key = None
    temp_db.define_table(changed_table ,db[changed_table],new_field,primarykey = changed_table_primary_key)
    return temp_db
Ejemplo n.º 12
0
def get_ticket_storage(app):
    private_folder = apath('%s/private' % app, r=request)
    db_string = open(os.path.join(private_folder, 'ticket_storage.txt')).read().replace('\r','').replace('\n','').strip()
    tickets_table = 'web2py_ticket'
    tablename = tickets_table + '_' + app
    db_path = apath('%s/databases' % app, r=request)
    from gluon import DAL
    ticketsdb = DAL(db_string, folder=db_path, auto_import=True)
    if not ticketsdb.get(tablename):
        table = ticketsdb.define_table(
                tablename,
                Field('ticket_id', length=100),
                Field('ticket_data', 'text'),
                Field('created_datetime', 'datetime'),
                )
    return ticketsdb , ticketsdb.get(tablename)
Ejemplo n.º 13
0
def get_migrated_db():
    """
    This function let up view how the database was after the 
    migration scripts were called , this lets us compare the 2 databases
    the one before and the one after the migrations
    """
    os.chdir(WEB2PY_PATH)
    sys.path.append(WEB2PY_PATH)
    from gluon import DAL, Field
    database_string = "sqlite://storage.db"
    old_database_folder = "%s/applications/%s/databases" % (WEB2PY_PATH, APP)
    db = DAL(database_string,
             folder=old_database_folder,
             auto_import=True,
             migrate_enabled=True,
             migrate=True)
    return db
Ejemplo n.º 14
0
    def __init__(self):

        request = current.request

        # Load s3cfg
        name = "applications.%s.modules.s3cfg" % request.application
        s3cfg = __import__(name)
        for item in name.split(".")[1:]:
            # Remove the dot
            s3cfg = getattr(s3cfg, item)
        settings = s3cfg.S3Config()
        # Pass into template
        current.deployment_settings = settings

        # Read settings
        model = "%s/models/000_config.py" % request.folder
        code = getcfs(model, model, None)
        response = current.response
        response.s3 = Storage(
        )  # Needed as some Templates look at this & we don't wish to crash
        environment = build_environment(request, response, current.session)
        environment["settings"] = settings

        def template_path():
            " Return the path of the Template config.py to load "
            path = os.path.join(request.folder, "private", "templates",
                                settings.get_template(), "config.py")
            return path

        environment["template_path"] = template_path
        environment["os"] = os
        environment["Storage"] = Storage
        restricted(code, environment, layer=model)

        self.db_engine = settings.get_database_type()
        (db_string, pool_size) = settings.get_database_string()

        # Get a handle to the database
        self.db = DAL(
            db_string,
            #folder="%s/databases" % request.folder,
            auto_import=True,
            # @ToDo: Set to False until we migrate
            migrate_enabled=True,
        )
Ejemplo n.º 15
0
    def __init__(self):
        self.db = DAL(
            current.config.get("cognito_db.uri"),
            pool_size=current.config.get("cognito_db.pool_size"),
            migrate_enabled=current.config.get("cognito_db.migrate"),
            check_reserved=["all"],
        )

        self.auth = Auth(db=self.db,
                         host_names=current.config.get("host.names"))

        self.auth.settings.create_user_groups = None

        # TODO: extend this during implementation
        self.auth.settings.extra_fields["auth_user"] = [
            Field("user_attributes", type="json")
        ]

        self.auth.define_tables(username=True, signature=True)
Ejemplo n.º 16
0
def adding_renamed_fields(table_name,old_field_name,new_field_name,attributes_to_copy):
    """
    This function is used add a field in table mentioned while 
    renaming a field . The renamed field is added separately to the table with the 
    same properties as the original field.     
    """
    database_string = "sqlite://storage.db"
    old_database_folder = "%s/applications/%s/databases" % (WEB2PY_PATH, APP)
    temp_db = DAL( database_string, folder = old_database_folder, migrate_enabled=True ,migrate = True)
    new_field = Field(new_field_name)
    try:
        table_primary_key = db[table_name]._primarykey
    except KeyError:
        table_primary_key = None
    for attribute in attributes_to_copy:
        exec_str = "new_field.%(attribute)s = db[table_name][old_field_name].%(attribute)s" % {"attribute":attribute}
        exec exec_str in globals() , locals()
    temp_db.define_table(table_name ,db[table_name],new_field,primarykey = table_primary_key)
    return temp_db
Ejemplo n.º 17
0
    def check_status(user_id, log_name, task_id, scheduler, task_name, folder):
        import os
        log_path = os.path.join(folder, "logs", "tasks")
        from gluon import DAL, Field
        '''
        If we use current.db here instead of getting a
        new handle to the db, the task that we
        previously queued won't get inserted into the db
        so every call we make in this method to check
        on the task's status will always result in the task being in
        the 'QUEUED' state.
        '''
        db = DAL('sqlite://storage.db',
                 folder='applications/eden/databases',
                 auto_import=True)
        table = db.scheduler_task
        query = (table.id == task_id)
        task_status = None
        try:
            task_status = db(query).select(table.status).first().status
        except AttributeError:
            task_status = 'Unknown (task not yet in db)'
        '''
        This is the preferred way to check a task's status since
        it's using the web2py API, but we can't use this
        because the scheduler is pointing to
        current.db (see above comment):
        task_status = scheduler.task_status(task_id, output=True)
        print task_status.scheduler_task.status
        print task_status.result
        print task_status.scheduler_run.run_output
        '''

        if not os.path.exists(log_path):
            os.makedirs(log_path)

        with open(os.path.join(log_path, log_name), "a+") as log:
            log.write('<%s>: %s is currently in the %s state\n' %
                      (datetime.datetime.now(), task_name, task_status))
Ejemplo n.º 18
0
 def instDB(self, storageFolder, storageConnectionString, autoImport):
     self.db = DAL(storageConnectionString,
                   folder=os.path.abspath(storageFolder),
                   auto_import=autoImport)
     return self.db
Ejemplo n.º 19
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import sys, os, telnetlib, time, datetime


def get_circuits(s='', find_par=''):
    pos = s.find(find_par) + 35
    return s[pos:pos + 6].strip()


sys.path.append('/home/www-data/web2py')
from gluon import DAL, Field
db = DAL('sqlite://storage.sqlite',
         folder='/home/www-data/web2py/applications/tgmonitor/databases')
db.define_table('tg_load', Field('check_date', 'datetime'),
                Field('tg_number', length=17), Field('busy', 'integer'),
                Field('installed', 'integer'))

host = '10.200.66.70'
port = '6000'
tn = telnetlib.Telnet(host, port)
tn.write('LGI:op="monitor",PWD ="dspoftk",SER="10.100.100.104---O&M System";')
ans = tn.read_until('END')

tn.write('DSP OFTK: LT=TG, TG=44, DT=AT;')
ans = tn.read_until('END')

_busy = get_circuits(ans, 'Busy')
_ins_num = get_circuits(ans, 'Installation number')
Ejemplo n.º 20
0
from gluon import DAL, Field

#db = DAL('sqlite://storage.sqlite')
db = DAL('postgres://*****:*****@db/postgres')
db.define_table('ipligence2', Field('ip_from', 'integer', 10, '0000000000'),
                Field('ip_to', 'integer', 10, '0000000000'),
                Field('country_code', 'string', 10),
                Field('country_name', 'string', 255),
                Field('continent_code', 'string', 10),
                Field('continent_name', 'string', 255),
                Field('time_zone', 'string', 10),
                Field('region_code', 'string', 10),
                Field('region_name', 'string', 255),
                Field('the_owner', 'string', 255),
                Field('city_name', 'string', 255),
                Field('county_name', 'string', 255),
                Field('latitude', 'double'), Field('longitude', 'double'))
db.ipligence2.import_from_csv_file(
    open(
        '/home/www-data/web2py/applications/TrackR/private/ipligence-max.mysqldump.sql'
    ), 'rb')
db.commit()
Ejemplo n.º 21
0
import re
import sys
import csv

# Assumption : script is placed inside scripts folder in application
scriptPath = os.path.dirname(os.path.realpath(__file__))
dbPath = os.path.abspath(os.path.join(scriptPath, "../databases"))
libraryPath = os.path.abspath(os.path.join(scriptPath, "../../.."))

sys.path.append(libraryPath)
from gluon import DAL

print dbPath, libraryPath

db = DAL('sqlite://storage.sqlite',
         folder=dbPath,
         auto_import=True,
         debug=True)


def readCsv(csvfile):
    with open(csvfile, 'rb') as f:
        reader = csv.reader(f)
        rownum = 0
        csvRecords = []
        for row in reader:
            csvRecord = {}
            if rownum == 0:
                header = row
            else:
                colnum = 0
                for col in row:
Ejemplo n.º 22
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys

sys.path.insert(1, '/home/iotuser/gate/web2py')

from gluon import DAL, Field
from gluon.validators import IS_NOT_EMPTY, IS_EMAIL, IS_NOT_IN_DB, IS_INT_IN_RANGE

db = DAL('sqlite://storage.sqlite',
         folder='/home/iotuser/gate/web2py/applications/gate/databases')
execfile('/home/iotuser/gate/web2py/applications/gate/models/db_gate.py')

tn = db(db.trusted.id == db.neighborhood.neighbor_id).select().first()
print tn

print db.tables
print db().select(db.oui.ALL)
#print db().select(db.security.ALL)
Ejemplo n.º 23
0
SLEEP_MINUTES = 5

errors_path = os.path.join(request.folder, 'errors')
try:
    db_string = open(os.path.join(request.folder, 'private',
                                  'ticket_storage.txt'),
                     encoding='UTF-8').read().replace('\r',
                                                      '').replace('\n',
                                                                  '').strip()
except:
    db_string = 'sqlite://storage.db'

db_path = os.path.join(request.folder, 'databases')

tk_db = DAL(db_string, folder=db_path, auto_import=True)
ts = TicketStorage(db=tk_db)
tk_table = ts._get_table(db=tk_db,
                         tablename=ts.tablename,
                         app=request.application)

hashes = {}

while 1:
    if request.tickets_db:
        print("You're storing tickets yet in database")
        sys.exit(1)

    for file in os.listdir(errors_path):
        filename = os.path.join(errors_path, file)
Ejemplo n.º 24
0
def main():
    """
    allows to run worker without python web2py.py .... by simply python this.py
    """
    parser = optparse.OptionParser()
    parser.add_option("-w",
                      "--worker_name",
                      dest="worker_name",
                      default=None,
                      help="start a worker with name")
    parser.add_option("-b",
                      "--heartbeat",
                      dest="heartbeat",
                      default=10,
                      type='int',
                      help="heartbeat time in seconds (default 10)")
    parser.add_option(
        "-L",
        "--logger_level",
        dest="logger_level",
        default=30,
        type='int',
        help=
        "set debug output level (0-100, 0 means all, 100 means none;default is 30)"
    )
    parser.add_option(
        "-E",
        "--empty-runs",
        dest="max_empty_runs",
        type='int',
        default=0,
        help="max loops with no grabbed tasks permitted (0 for never check)")
    parser.add_option(
        "-g",
        "--group_names",
        dest="group_names",
        default='main',
        help="comma separated list of groups to be picked by the worker")
    parser.add_option(
        "-f",
        "--db_folder",
        dest="db_folder",
        default='/Users/mdipierro/web2py/applications/scheduler/databases',
        help="location of the dal database folder")
    parser.add_option("-u",
                      "--db_uri",
                      dest="db_uri",
                      default='sqlite://storage.sqlite',
                      help="database URI string (web2py DAL syntax)")
    parser.add_option(
        "-t", "--tasks",dest="tasks",default=None,
        help="file containing task files, must define" + \
            "tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
    (options, args) = parser.parse_args()
    if not options.tasks or not options.db_uri:
        print USAGE
    if options.tasks:
        path, filename = os.path.split(options.tasks)
        if filename.endswith('.py'):
            filename = filename[:-3]
        sys.path.append(path)
        print 'importing tasks...'
        tasks = __import__(filename, globals(), locals(), [], -1).tasks
        print 'tasks found: ' + ', '.join(tasks.keys())
    else:
        tasks = {}
    group_names = [x.strip() for x in options.group_names.split(',')]

    logging.getLogger().setLevel(options.logger_level)

    print 'groups for this worker: ' + ', '.join(group_names)
    print 'connecting to database in folder: ' + options.db_folder or './'
    print 'using URI: ' + options.db_uri
    db = DAL(options.db_uri, folder=options.db_folder)
    print 'instantiating scheduler...'
    scheduler = Scheduler(db=db,
                          worker_name=options.worker_name,
                          tasks=tasks,
                          migrate=True,
                          group_names=group_names,
                          heartbeat=options.heartbeat,
                          max_empty_runs=options.max_empty_runs)
    print 'starting main worker loop...'
    scheduler.loop()
Ejemplo n.º 25
0
    def backup(self):
        """
            Backup the database to a local SQLite database

            @ToDo: Option to use a temporary DB in Postgres/MySQL as this takes
                   too long for a large DB
        """

        moves = self.moves
        news = self.news
        strints = self.strints
        strbools = self.strbools
        if not moves and not news and not strbools and not strints:
            # Nothing to backup
            return

        import os

        db = self.db
        folder = "%s/databases/backup" % current.request.folder

        # Create clean folder for the backup
        if os.path.exists(folder):
            shutil.rmtree(folder)
            import time
            time.sleep(1)
        os.mkdir(folder)

        # Setup backup database
        db_bak = DAL("sqlite://backup.db", folder=folder, adapter_args={"foreign_keys": False})

        # Copy Table structure
        skip = []
        for tablename in db.tables:
            if tablename == "gis_location":
                table = db[tablename]
                fields = [table[field] for field in table.fields if field != "the_geom"]
                try:
                    db_bak.define_table(tablename, *fields)
                except KeyError:
                    # Can't resolve reference yet
                    # Cleanup
                    del db_bak[tablename]
                    # Try later
                    skip.append(tablename)
            else:
                try:
                    db_bak.define_table(tablename, db[tablename])
                except KeyError:
                    # Can't resolve reference yet
                    # Cleanup
                    del db_bak[tablename]
                    # Try later
                    skip.append(tablename)
        while skip:
            _skip = []
            for tablename in skip:
                if tablename == "gis_location":
                    table = db[tablename]
                    fields = [table[field] for field in table.fields if field != "the_geom"]
                    try:
                        db_bak.define_table(tablename, *fields)
                    except KeyError:
                        # Can't resolve reference yet
                        # Cleanup
                        del db_bak[tablename]
                        # Try later
                        _skip.append(tablename)
                    except:
                        import sys
                        print "Skipping %s: %s" % (tablename, sys.exc_info()[1])
                else:
                    try:
                        db_bak.define_table(tablename, db[tablename])
                    except KeyError:
                        # Can't resolve reference yet
                        # Cleanup
                        del db_bak[tablename]
                        # Try later
                        _skip.append(tablename)
                    except:
                        import sys
                        print "Skipping %s: %s" % (tablename, sys.exc_info()[1])
            skip = _skip

        # Which tables do we need to backup?
        tables = []
        if moves:
            for tablename in moves:
                tables.append(tablename)
        if news:
            for tablename in news:
                new = news[tablename]
                for t in new["tables"]:
                    tables.append(t)
                for s in new["supers"]:
                    tables.append(s)
                    stable = db[s]
                    rows = db(stable._id > 0).select(stable.instance_type)
                    instance_types = set([r.instance_type for r in rows])
                    for t in instance_types:
                        tables.append(t)
        if strbools:
            for tablename, fieldname in strints:
                tables.append(tablename)
        if strints:
            for tablename, fieldname in strints:
                tables.append(tablename)

        # Remove duplicates
        tables = set(tables)

        # Copy Data
        import csv
        csv.field_size_limit(2**20 * 100)  # 100 megs
        for tablename in tables:
            filename = "%s/%s.csv" % (folder, tablename)
            file = open(filename, "w")
            rows = db(db[tablename].id > 0).select()
            rows.export_to_csv_file(file)
            file.close()
            file = open(filename, "r")
            db_bak[tablename].import_from_csv_file(file, unique="uuid2") # uuid2 designed to not hit!
            file.close()
            db_bak.commit()

        # Pass handle back to other functions
        self.db_bak = db_bak
Ejemplo n.º 26
0
    def send_heartbeat(self, counter):
        if not self.db_thread:
            logger.debug('thread building own DAL object')
            self.db_thread = DAL(self.db._uri, folder=self.db._adapter.folder)
            self.define_tables(self.db_thread, migrate=False)
        try:
            db = self.db_thread
            sw, st = db.scheduler_worker, db.scheduler_task
            now = self.now()
            # record heartbeat
            mybackedstatus = db(
                sw.worker_name == self.worker_name).select().first()
            if not mybackedstatus:
                sw.insert(status=ACTIVE,
                          worker_name=self.worker_name,
                          first_heartbeat=now,
                          last_heartbeat=now,
                          group_names=self.group_names)
                self.worker_status = [ACTIVE, 1]  # activating the process
                mybackedstatus = ACTIVE
            else:
                mybackedstatus = mybackedstatus.status
                if mybackedstatus == DISABLED:
                    # keep sleeping
                    self.worker_status[0] = DISABLED
                    if self.worker_status[1] == MAXHIBERNATION:
                        logger.debug('........recording heartbeat (%s)',
                                     self.worker_status[0])
                        db(sw.worker_name == self.worker_name).update(
                            last_heartbeat=now)
                elif mybackedstatus == TERMINATE:
                    self.worker_status[0] = TERMINATE
                    logger.debug("Waiting to terminate the current task")
                    self.give_up()
                    return
                elif mybackedstatus == KILL:
                    self.worker_status[0] = KILL
                    self.die()
                else:
                    if mybackedstatus == STOP_TASK:
                        logger.info('Asked to kill the current task')
                        self.terminate_process()
                    logger.debug('........recording heartbeat (%s)',
                                 self.worker_status[0])
                    db(sw.worker_name == self.worker_name).update(
                        last_heartbeat=now, status=ACTIVE)
                    self.worker_status[1] = 1  # re-activating the process
                    if self.worker_status[0] != RUNNING:
                        self.worker_status[0] = ACTIVE

            self.do_assign_tasks = False
            if counter % 5 == 0 or mybackedstatus == PICK:
                try:
                    # delete inactive workers
                    expiration = now - datetime.timedelta(
                        seconds=self.heartbeat * 3)
                    departure = now - datetime.timedelta(
                        seconds=self.heartbeat * 3 * MAXHIBERNATION)
                    logger.debug(
                        '    freeing workers that have not sent heartbeat')
                    inactive_workers = db(((sw.last_heartbeat < expiration)
                                           & (sw.status == ACTIVE))
                                          | ((sw.last_heartbeat < departure)
                                             & (sw.status != ACTIVE)))
                    db(st.assigned_worker_name.belongs(
                        inactive_workers._select(sw.worker_name)))(st.status == RUNNING)\
                        .update(assigned_worker_name='', status=QUEUED)
                    inactive_workers.delete()
                    try:
                        self.is_a_ticker = self.being_a_ticker()
                    except:
                        logger.error('Error coordinating TICKER')
                    if self.worker_status[0] == ACTIVE:
                        self.do_assign_tasks = True
                except:
                    logger.error('Error cleaning up')
            db.commit()
        except:
            logger.error('Error retrieving status')
            db.rollback()
        self.adj_hibernation()
        self.sleep()
    def send_heartbeat(self, counter):
        if not self.db_thread:
            logging.debug('thread building own DAL object')
            self.db_thread = DAL(self.db._uri, folder=self.db._adapter.folder)
            self.define_tables(self.db_thread, migrate=False)
        try:
            db = self.db_thread
            sw, st = db.scheduler_worker, db.scheduler_task
            now = self.now()
            expiration = now - datetime.timedelta(seconds=self.heartbeat * 3)
            departure = now - datetime.timedelta(seconds=self.heartbeat * 3 *
                                                 MAXHIBERNATION)
            # record heartbeat
            mybackedstatus = db(
                sw.worker_name == self.worker_name).select().first()
            if not mybackedstatus:
                sw.insert(status=ACTIVE,
                          worker_name=self.worker_name,
                          first_heartbeat=now,
                          last_heartbeat=now,
                          group_names=self.group_names)
                self.worker_status = ACTIVE, 1  #activating the process
            else:
                if mybackedstatus.status == DISABLED:
                    self.worker_status = DISABLED, self.worker_status[
                        1]  #keep sleeping
                    if self.worker_status[1] == MAXHIBERNATION:
                        logging.debug('........recording heartbeat')
                        db(sw.worker_name == self.worker_name).update(
                            last_heartbeat=now)

                elif mybackedstatus.status == TERMINATE:
                    self.worker_status = TERMINATE, self.worker_status[1]
                    logging.debug("Waiting to terminate the current task")
                    self.give_up()
                    return
                elif mybackedstatus.status == KILL:
                    self.worker_status = KILL, self.worker_status[1]
                    self.die()

                else:
                    logging.debug('........recording heartbeat')
                    db(sw.worker_name == self.worker_name).update(
                        last_heartbeat=now, status=ACTIVE)
                    self.worker_status = ACTIVE, 1  #re-activating the process

            self.do_assign_tasks = False
            if counter % 5 == 0:
                try:
                    # delete inactive workers
                    logging.debug(
                        '    freeing workers that have not sent heartbeat')
                    inactive_workers = db(((sw.last_heartbeat < expiration)
                                           & (sw.status == ACTIVE))
                                          | ((sw.last_heartbeat < departure)
                                             & (sw.status != ACTIVE)))
                    db(st.assigned_worker_name.belongs(
                        inactive_workers._select(sw.worker_name)))\
                        (st.status == RUNNING)\
                        .update(assigned_worker_name='',status=QUEUED)
                    inactive_workers.delete()
                    self.is_a_ticker = self.being_a_ticker()
                    if self.worker_status[0] == ACTIVE:
                        self.do_assign_tasks = True
                except:
                    pass
            db.commit()
        except:
            db.rollback()
        self.adj_hibernation()
        self.sleep()
# Copyright 2011 - Thomas Bellembois [email protected]
# Cecill licence, see LICENSE
# $Id: chimitheque_ide_autocomplete.py 194 2015-02-23 16:27:16Z tbellemb $
# -*- coding: utf-8 -*-
if False:
    #
    # never imported - just for IDE autocompletion
    #
    from gluon import DAL
    db = DAL()
    from gluon import settings
    from gluon.cache import Cache
    from gluon.dal import Field
    from gluon.globals import Request
    request = Request()
    from gluon.globals import Response
    response = Response()
    from gluon.globals import Session
    session = Session()
    from gluon.html import *
    from gluon.http import HTTP
    from gluon.http import redirect
    from gluon.languages import translator
    T = translator(request)
    from gluon.sqlhtml import SQLFORM
    from gluon.tools import Auth
    auth = Auth()
    from gluon.tools import Crud, Mail
    from gluon.validators import *
    import sys
    mail = Mail()
Ejemplo n.º 29
0
from datetime import datetime, timedelta
from multiprocessing.util import Finalize
from time import time
from anyjson import deserialize, serialize
from celery import schedules
from celery.beat import Scheduler, ScheduleEntry
from celery.utils.timeutils import timedelta_seconds
from celeryconfig import CELERY_RESULT_DBURI

import sys, os
sys.path.append(os.environ['WEB2PY_PATH'])

from gluon import DAL
folder, uri = os.path.split(CELERY_RESULT_DBURI.split(':///')[1])
db = DAL(CELERY_RESULT_DBURI.split(':///')[0] + '://' + uri,
         folder=folder,
         migrate_enabled=False,
         auto_import=True)
print 'I found these table: ' + ', '.join(db.tables())
if db(db.celery_periodictasks).count() > 0:
    logging.error('found too many db.celery_periodictasks, deleting them all')
    db(db.celery_periodictasks).delete()
if db(db.celery_periodictasks).count() < 1:
    logging.error('found no db.celery_periodictasks, making a singleton')
    db.celery_periodictasks(last_update=datetime.now())


def get_or_make_unique(table, **fields):
    query = reduce(lambda a, b: a & b,
                   [table[key] == value for key, value in fields.items()])
    rows = table._db(query).select(limitby=(0, 2))
    if len(rows) > 1:
Ejemplo n.º 30
0
    def post(self, moves=None,
                   news=None,
                   strbools=None,
                   strints=None,
                   ):
        """
            Cleanup after migration

            @param moves     : List of dicts {tablename: [(fieldname, new_tablename, link_fieldname)]} to move a field from 1 table to another
                              - fieldname can be a tuple if the fieldname changes: (fieldname, new_fieldname)
            @param news      : List of dicts {new_tablename: {'lookup_field': '',
                                                              'tables': [tablename: [fieldname]],
                                                              'supers': [tablename: [fieldname]],
                                                              } to create new records from 1 or more old tables (inc all instances of an SE)
                              - fieldname can be a tuple if the fieldname changes: (fieldname, new_fieldname)
            @param strbools : List of tuples [(tablename, fieldname)] to convert from string/integer to bools
            @param strints  : List of tuples [(tablename, fieldname)] to convert from string to integer
        """

        db = self.db

        # @ToDo: Do prepops of new tables

        # Restore data from backup
        folder = "%s/databases/backup" % current.request.folder
        db_bak = DAL("sqlite://backup.db",
                     folder=folder,
                     auto_import=True,
                     migrate=False)

        if moves:
            for tablename in moves:
                table = db_bak[tablename]
                fieldname, new_tablename, link_fieldname = moves[tablename]
                if isinstance(fieldname, (tuple, list)):
                    fieldname, new_fieldname = fieldname
                else:
                    new_fieldname = fieldname
                old_field = table[fieldname]
                new_linkfield = db[new_tablename][link_fieldname]
                rows = db_bak(table._id > 0).select(old_field, link_fieldname)
                for row in rows:
                    update_vars = {}
                    update_vars[new_fieldname] = row[old_field]
                    db(new_linkfield == row[link_fieldname]).update(**update_vars)

        if news:
            for tablename in news:
                # Read Data
                data = {}
                new = news[tablename]
                lookup_field = new["lookup_field"]
                _tables = new["tables"]
                for t in _tables:
                    fields = _tables[t]
                    # @ToDo: Support tuples
                    #for f in fields:
                    #    if isinstance(f, (tuple, list)):
                    table = db_bak[t]
                    table_fields = [table[f] for f in fields]
                    rows = db_bak(table.deleted == False).select(table[lookup_field],
                                                                 *table_fields)
                    for row in rows:
                        record_id = row[lookup_field]
                        if record_id in data:
                            _new = False
                            _data = data[record_id]
                        else:
                            _new = True
                            _data = {}
                        for f in fields:
                            if f in row:
                                if row[f] not in ("", None):
                                    # JSON type doesn't like ""
                                    _data[f] = row[f]
                        if _new:
                            data[record_id] = _data

                for s in new["supers"]:
                    fields = new["supers"][s]
                    # @ToDo: Support tuples
                    #for f in fields:
                    #    if isinstance(f, (tuple, list)):
                    stable = db_bak[s]
                    superkey = stable._id.name
                    rows = db_bak(stable.deleted == False).select(stable._id,
                                                                  stable.instance_type)
                    for row in rows:
                        etable = db_bak[row["instance_type"]]
                        _fields = [f for f in fields if f in etable.fields]
                        table_fields = [etable[f] for f in _fields]
                        record = db_bak(etable[superkey] == row[superkey]).select(etable[lookup_field],
                                                                                  *table_fields
                                                                                  ).first()
                        if record:
                            record_id = record[lookup_field]
                            if record_id in data:
                                _new = False
                                _data = data[record_id]
                            else:
                                _new = True
                                _data = {}
                            for f in _fields:
                                if f in record:
                                    if record[f] not in ("", None):
                                        # JSON type doesn't like ""
                                        _data[f] = record[f]
                            if _new:
                                data[record_id] = _data

                # Create Records
                table = db[tablename]
                for record_id in data:
                    update_vars = data[record_id]
                    if update_vars:
                        update_vars[lookup_field] = record_id
                        # Can't rely on the defaults as auto_import doesn't see DAL defaults
                        update_vars["created_on"] = datetime.datetime.utcnow()
                        update_vars["deleted"] = False
                        update_vars["mci"] = 0
                        update_vars["modified_on"] = datetime.datetime.utcnow()
                        update_vars["uuid"] = uuid4().urn # Would always be identical otherwise
                        table.insert(**update_vars)

        if strints:
            for tablename, fieldname in strints:
                newtable = db[tablename]
                newrows = db(newtable.id > 0).select(newtable.id)
                oldtable = db_bak[tablename]
                oldrows = db_bak(oldtable.id > 0).select(oldtable.id,
                                                         oldtable[fieldname])
                oldvals = oldrows.as_dict()
                for row in newrows:
                    _id = row.id
                    val = oldvals[_id][fieldname]
                    if not val:
                        continue
                    try:
                        update_vars = {fieldname : int(val)}
                    except:
                        current.log.warning("S3Migrate: Unable to convert %s to an integer - skipping" % val)
                    else:
                        db(newtable.id == _id).update(**update_vars)

        if strbools:
            for tablename, fieldname in strbools:
                to_bool = self.to_bool
                newtable = db[tablename]
                newrows = db(newtable.id > 0).select(newtable.id)
                oldtable = db_bak[tablename]
                oldrows = db_bak(oldtable.id > 0).select(oldtable.id,
                                                         oldtable[fieldname])
                oldvals = oldrows.as_dict()
                for row in newrows:
                    _id = row.id
                    val = oldvals[_id][fieldname]
                    if not val:
                        continue
                    val = to_bool(val)
                    if val:
                        update_vars = {fieldname : val}
                        db(newtable.id == _id).update(**update_vars)

        db.commit()