Ejemplo n.º 1
0
 def db_from_uri(uri: str, name: str, databases: dict):
     factory, params = resolve_uri(uri)
     params['database_name'] = name
     storage = factory()
     return DB(storage, databases=databases, **params)
Ejemplo n.º 2
0
dbtype = sys.argv[2]

if dbtype.startswith('f'):
    storage = FileStorage('Data.fs')
else:
    clientid = sys.argv[3]
    addr = ('localhost', 1234)
    #storage = ClientStorage.ClientStorage(addr,cache_size=2048*1024*1024,client='shm/p_gclient' +clientid)
    storage = ClientStorage.ClientStorage(addr,
                                          cache_size=512 * 1024 * 1024,
                                          client='querykmcache' + clientid)
    #storage = ClientStorage.ClientStorage(addr,cache_size=0)
    #storage = ClientStorage.ClientStorage(addr)

#db = DB(storage,cache_size=1000000,cache_size_bytes=1024*1024*124)
db = DB(storage, cache_size=200000,
        cache_size_bytes=1024 * 1024 * 124)  #this uses 0.03 secs
#db = DB(storage)
connection = db.open()
root = connection.root()
g = root['graphdb']

fixedtarget = None
maxnum = 1000


# given a topic, query all authors that like that topic (10 points) or have written an
# article on it (3 points) or have worked on a project on it (5 points)
def addpoints(found, name, points):
    found[name] = found.setdefault(name, 0) + points

Ejemplo n.º 3
0
def db_from_uri(uri, resolve_uri=resolve_uri):
    storage_factory, dbkw = resolve_uri(uri)
    storage = storage_factory()
    return DB(storage, **dbkw)
Ejemplo n.º 4
0
 def getdb(self):
     from ZODB import DB
     from ZODB.FileStorage import FileStorage
     self._file_name = storage_filename = os.path.join(self.dir, 'Data.fs')
     storage = FileStorage(storage_filename)
     self.db = DB(storage)
Ejemplo n.º 5
0
 def __init__(self, archivo):
     self.storage = FileStorage.FileStorage(archivo)
     self.db = DB(self.storage)
     self.conexion = self.db.open()
     self.raiz = self.conexion.root()
Ejemplo n.º 6
0
    def __init__(self):

        directory = get_directory()

        self.logger = logging.getLogger('ZODB.FileStorage')
        fh = logging.FileHandler(directory + 'db.log')
        self.logger.addHandler(fh)

        self.storage = FileStorage.FileStorage(directory + 'db.fs')
        self.db = DB(self.storage)
        self.connection = self.db.open()

        dbroot = self.connection.root()

        if not dbroot.has_key('job_key'):
            from BTrees.OOBTree import OOBTree
            dbroot['job_key'] = OOBTree()
            dbroot['job_key']['val'] = 0

        self.job_key = dbroot['job_key']

        # Ensure that a 'job_db' key is present
        # in the root
        if not dbroot.has_key('job_db'):
            from BTrees.OOBTree import OOBTree
            dbroot['job_db'] = OOBTree()

        self.job_db = dbroot['job_db']

        if not dbroot.has_key('user_db'):
            from BTrees.OOBTree import OOBTree
            dbroot['user_db'] = OOBTree()
            self.user_db = dbroot['user_db']
            self.user_db['user'] = User('unknown', 'unknown', 'unknown')

        self.user_db = dbroot['user_db']

        if not dbroot.has_key('site_db'):
            from BTrees.OOBTree import OOBTree
            dbroot['site_db'] = OOBTree()
            self.site_db = dbroot['site_db']

        self.site_db = dbroot['site_db']

        if scheduler is not None:
            self.site_db[scheduler.name()] = Site(scheduler.name(),
                                                  scheduler.scheduler_type())

        if not dbroot.has_key('queue_db'):
            from BTrees.OOBTree import OOBTree
            dbroot['queue_db'] = OOBTree()
            self.queue_db = dbroot['queue_db']

        self.queue_db = dbroot['queue_db']

        from version import get_git_version
        if not dbroot.has_key('version'):
            dbroot['version'] = get_git_version()
        else:
            current_version = dbroot['version']
            new_version = get_git_version()
            # Add any migrations required here
            if current_version != new_version:
                pass

            dbroot['version'] = new_version

        if not dbroot.has_key('remote_site_db'):
            from BTrees.OOBTree import OOBTree
            dbroot['remote_site_db'] = OOBTree()
            self.remote_site_db = dbroot['remote_site_db']

        self.remote_site_db = dbroot['remote_site_db']
Ejemplo n.º 7
0
 def __init__(self, file_storage):
     self.storage = FileStorage.FileStorage(file_storage)
     self.compressed_storage = zc.zlibstorage.ZlibStorage(self.storage)
     self.db = DB(self.compressed_storage)
     self.connection = self.db.open()
     self.db_root = self.connection.root()
Ejemplo n.º 8
0
    def db(self):
        if self.__db is None:
            self.__db = DB(self.storage)

        return self.__db
Ejemplo n.º 9
0
import tarfile
import xml.parsers.expat
import xml.dom.minidom
from urllib import url2pathname
from ZODB.DemoStorage import DemoStorage
from ZODB import DB
from OFS.XMLExportImport import importXML

if int(os.environ.get('erp5_report_new_simulation_failures') or 0):
  newSimulationExpectedFailure = lambda test: test
else:
  from unittest import expectedFailure as newSimulationExpectedFailure

# Keep a global reference to a ZODB storage so that we can import business
# template xml files. XXX this connection will remain open.
db = DB(DemoStorage())
connection = db.open()


class BusinessTemplateInfoBase:

  def __init__(self, target):
    self.target = target
    self.setUp()

  def setUp(self):
    self.title = ''
    self.modules = {}
    self.allowed_content_types = {}
    self.actions = {}
Ejemplo n.º 10
0
def _gen_testdb(outfs_path, zext):
    xtime_reset()

    ext = ext4subj
    if not zext:
        def ext(subj): return {}

    logging.basicConfig()

    # generate random changes to objects hooked to top-level root by a/b/c/... key
    random.seed(0)

    namev = [_ for _ in "abcdefg"]
    Niter = 2
    for i in range(Niter):
        stor = FileStorage(outfs_path, create=(i == 0))
        db   = DB(stor)
        conn = db.open()
        root = conn.root()
        assert root._p_oid == p64(0), repr(root._p_oid)

        for j in range(25):
            name = random.choice(namev)
            if name in root:
                obj = root[name]
            else:
                root[name] = obj = Object(None)

            obj.value = "%s%i.%i" % (name, i, j)

            commit(u"user%i.%i" % (i,j), u"step %i.%i" % (i, j), ext(name))

        # undo a transaction one step before a latest one a couple of times
        for j in range(2):
            # XXX undoLog, despite what its interface says:
            #   https://github.com/zopefoundation/ZODB/blob/2490ae09/src/ZODB/interfaces.py#L472
            # just returns log of all transactions in specified range:
            #   https://github.com/zopefoundation/ZODB/blob/2490ae09/src/ZODB/FileStorage/FileStorage.py#L1008
            #   https://github.com/zopefoundation/ZODB/blob/2490ae09/src/ZODB/FileStorage/FileStorage.py#L2103
            # so we retry undoing next log's txn on conflict.
            for ul in db.undoLog(1, 20):
                try:
                    db.undo(ul["id"])
                    commit(u"root%i.%i\nYour\nMagesty " % (i, j),
                           u"undo %i.%i\nmore detailed description\n\nzzz ..." % (i, j) + "\t"*(i+j),
                           ext("undo %s" % ul["id"]))
                except UndoError:
                    transaction.abort()
                    continue

                break

        # delete an object
        name = random.choice(list(root.keys()))
        obj = root[name]
        root[name] = Object("%s%i*" % (name, i))
        # NOTE user/ext are kept empty on purpose - to also test this case
        commit(u"", u"predelete %s" % unpack64(obj._p_oid), {})

        # XXX obj in db could be changed by above undo, but ZODB does not automatically
        # propagate undo changes to live objects - so obj._p_serial can be stale.
        # Get serial via history.
        obj_tid_lastchange = db.history(obj._p_oid)[0]['tid']

        txn = precommit(u"root%i\nYour\nRoyal\nMagesty' " % i +
                            ''.join(chr(_) for _ in range(32)),     # <- NOTE all control characters
                        u"delete %i\nalpha beta gamma'delta\"lambda\n\nqqq ..." % i,
                        ext("delete %s" % unpack64(obj._p_oid)))
        # at low level stor requires ZODB.IStorageTransactionMetaData not txn (ITransaction)
        txn_stormeta = TransactionMetaData(txn.user, txn.description, txn.extension)
        stor.tpc_begin(txn_stormeta)
        stor.deleteObject(obj._p_oid, obj_tid_lastchange, txn_stormeta)
        stor.tpc_vote(txn_stormeta)
        # TODO different txn status vvv
        # XXX vvv it does the thing, but py fs iterator treats this txn as EOF
        #if i != Niter-1:
        #    stor.tpc_finish(txn_stormeta)
        stor.tpc_finish(txn_stormeta)

        # close db & rest not to get conflict errors after we touched stor
        # directly a bit. everything will be reopened on next iteration.
        conn.close()
        db.close()
        stor.close()
Ejemplo n.º 11
0
def mech_design_get(idxMech=None):
    actions = list()
    user = cherrypy.session['user']
    mechDesign = cherrypy.session['mechDesign']
    equation = cherrypy.session['equation']
    solution = equation.solution_get('S')
    ds = Geometric.DraftSpace(backend='Agg')
    ds.clear()
    ds.showGrid = False
    ds.showTick = False
    ds.showAnchor = False
    mechPath = os.path.join(get_server_setting('imagesdir'),
                            cherrypy.session.id)
    ds.path = mechPath
    ds.authorOrigin = equation.instance.owner.firstName + ' ' + equation.instance.owner.lastName
    ds.authorModify = user.firstName + ' ' + user.lastName
    ds.dateOrigin = equation.instance.timestamp.strftime("%m-%d-%Y")
    ds.title = equation.instance.title
    if not os.path.exists(mechPath):
        os.mkdir(mechPath)
    idxList = list()
    if idxMech != None:
        idxList.append(idxMech)
    else:
        for drawing in mechDesign.drawings:
            idxList.append(drawing.id)
    storage = FileStorage.FileStorage(
        os.path.join(get_server_setting('cachedir'), 'cache.fs'))
    db = DB(storage)
    connection = db.open()
    root = connection.root()
    for idxMech in idxList:
        for drawing in mechDesign.drawings:  #search for drawing
            if drawing.id == idxMech:
                break
        mechOptions = cherrypy.session['mechOptions'][idxMech]
        ds.ratio = mechOptions['ratio']
        ds.perspective = drawing.perspective.capitalize()
        ds.name = 'Mechanical-Drawing' + drawing.perspective.capitalize(
        ) + time.strftime('%y%m%d%H%M%S')
        imgUrl = 'api/image?app=Mechanical&tab=Drawing&id1=%d&id2=%d' % (
            mechDesign.id, drawing.id) + '&image=' + ds.name + '.' + DRAWMODE
        values = dict()
        symbols = dict()
        baseUnit = equation.unumDict[int(mechOptions['unit'])]
        for variableSol in solution.variables:
            for variableMech in drawing.variables:
                desig = variableMech.desig.longhand + str(variableMech.number)
                if variableMech.id == variableSol.variable.id:
                    symbols[desig] = variableMech.desig.latex.replace(
                        '#', str(variableMech.number))
                    if variableSol.variable.desig.name == 'Length':
                        unumSelected = equation.variable_get_unit(
                            variableSol, 'selected-unum')
                        unumValue = variableSol.value * unumSelected
                        unumValue = unumValue.asUnit(baseUnit)
                        value = unumValue._value
                    else:
                        value = variableSol.value

                    values[desig] = value

        key = '%s\n%s\n%s\n%s\n%s' % (
            mechOptions['size'], mechOptions['checkOptions'],
            mechOptions['ratio'], mechOptions['unit'],
            ds.get_cmd(drawing.drawing, values))
        cache = root.cache_image.get(key, None)
        imgFileLoc = os.path.join(ds.path, ds.name + '.' + DRAWMODE)
        if cache:
            fId = open(imgFileLoc, 'wb+')
            fId.write(cache.image)
            cache.update()  # update timestamp
            root.cache_image[key] = cache
            transaction.commit()
        else:
            cherrypy.log.error('Draw %s, %s' % (drawing, values), 'MECHANICAL',
                               logging.DEBUG)
            ds.load(drawing.drawing, values)
            cherrypy.log.error('Set background.', 'MECHANICAL', logging.DEBUG)
            ds.draw_background(mechOptions['size'])
            ds.save(fmt=DRAWMODE)
            root.cache_image[key] = ModelCore.CacheImage(imgFileLoc)
            transaction.commit()
        ds.clear()
        actions.append(
            new_action('actDrawingAdd',
                       'Drawing',
                       idx=idxMech,
                       image=imgUrl,
                       variables=None,
                       code=drawing.drawing,
                       perspective=drawing.perspective))
        actions.append(
            new_action('actRatioChange',
                       'Drawing',
                       idx=idxMech,
                       ratio=mechOptions['ratio']))
        actions.append(
            new_action('actSizeFill',
                       'Drawing',
                       idx=idxMech,
                       options=Geometric.options,
                       value=mechOptions['size']))
        actions.append(
            new_action('actUnitFill',
                       'Drawing',
                       idx=idxMech,
                       options={
                           100000: 'm',
                           302800: 'ft',
                           300103: 'cm',
                           302700: 'in',
                           300102: 'mm',
                           302900: 'mil'
                       },
                       value=int(mechOptions['unit'])))
    connection.close()  #close cache
    db.close()
    storage.close()
    return {'actions': actions}
Ejemplo n.º 12
0
def render_circuitikz(latex, dest = None, equation = None, display = None):
    actions = list()
    if dest:
        filePath, fileName = os.path.split(dest)
        imageSessPath = filePath
        imagePath = os.path.join(imageSessPath, fileName + '.png')
    else:
        equation = cherrypy.session['equation']
        display = cherrypy.session['display']
        fileName = 'Diagram-Circuit%s'%time.strftime('%y%m%d%H%M%S')
        imageSessPath = os.path.join(get_server_setting('imagesdir'), cherrypy.session.id)
        actions.append(new_action('actCircuitTikzSubmit', 'Circuit', id =cherrypy.session['circuit'].id))      
        actions.append(new_action('actCircuitTikzSubmit', 'Solver', id =cherrypy.session['circuit'].id))
    imagePath = os.path.join(imageSessPath, fileName + '.png')
    if not os.path.isdir(imageSessPath):
        os.mkdir(imageSessPath)
        cherrypy.log.error('Making directory %s'%imageSessPath, 'DIAGRAM', logging.DEBUG)
    if latex == '':
        return {'actions':actions}
    else:
        # look in cache for existing results
        storage = FileStorage.FileStorage(os.path.join(get_server_setting('cachedir'), 'cache.fs'))
        db = DB(storage)
        connection = db.open()
        root = connection.root()
        cache = root.cache_image.get(latex, None)
        if cache:
            imgFile = open(imagePath, 'wb+')
            imgFile.write(cache.image)
            cache.update() # update timestamp
            root.cache_image[latex] = cache
            transaction.commit()
        else:            
            head =  \
"""\\documentclass[border=4pt]{standalone}
\\usepackage{tikz}
\\usepackage{circuitikz}
\\usepackage{siunitx}
\\pagestyle{empty}
\\begin{document}
\\begin{circuitikz}[%s]
\\newcommand*{\\doublelabel}[3][3em]{\\parbox{#1}{\\raggedleft #2 \\\\ #3}}
"""
            head = head%('american')
            tail = \
"""
\\end{circuitikz}
\\end{document}
"""
            latexDoc = head + latex + tail
            latexDoc = execute_latex_controls(latexDoc, equation, display)
            imagePath = os.path.join(imageSessPath, fileName + '.png')
            args = '-interaction=nonstopmode -halt-on-error -jobname %s -output-directory=%s '%(fileName, imageSessPath)
            # string -> txt (\todo: fix, should render as a string
            if texMode == 'file':
                texFileLoc = os.path.join(imageSessPath, fileName + '.tex')
                f = open(texFileLoc, 'w')
                f.write(latexDoc)
                f.close()
                # tex -> pdf processingsa
                cmd = '"%s" %s %s'%(os.path.join(latexPath, latexEngine), args, texFileLoc)
            elif texMode == 'string':
                # string -> pdf processing
                cmd = '"%s" %s "%s"'%(os.path.join(latexPath, latexEngine), args, latexDoc.replace('\n', ' '))
            cherrypy.log.error("Running %s"%cmd, 'DIAGRAM', logging.DEBUG)
            p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
            stdoutdata, stderrdata  = p.communicate()
            if stderrdata:
                cherrypy.log.error('Error: %s'%stderrdata, 'DIAGRAM', logging.ERROR)
            idx = stdoutdata.find('!')
            if idx >0 :
                cherrypy.log.error('Error: %s'%stdoutdata[idx:], 'DIAGRAM', logging.ERROR, True)
                raise Exception('Latex Error ' + stdoutdata[idx:])
            else:
                imgMagicCmd = 'convert -trim -density 128 "%s.pdf" -background none -undercolor none -pointsize 6 label:"               " -gravity SouthEast -append -pointsize 6 label:"(c) OneSolver.com" -gravity SouthEast -append "%s"'
                if not dest:
                    # latex did not have any errors, save to ram
                    if cherrypy.session.has_key('circuit'):
                        dbCircuit = cherrypy.session['circuit']
                    else:
                        session_new()
                    dbCircuit.latex = latex
                    dbCircuit.equation.instance = equation.instance
                    cherrypy.session['circuit'] = dbCircuit
                    # pdf -> png processing (imagemagick)   
                    cmd = imgMagicCmd%(os.path.join(imageSessPath, fileName), imagePath)
                    cherrypy.log.error('Running %s'%cmd, 'DIAGRAM', logging.DEBUG)
                    p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
                    stdoutdata, stderrdata  = p.communicate()
                    if stdoutdata:
                        cherrypy.log.error('Output: %s'%stdoutdata, 'DIAGRAM', logging.INFO)
                    if stderrdata:
                        cherrypy.log.error('Error: %s'%stderrdata, 'DIAGRAM', logging.ERROR)
                else:
                    cmd = imgMagicCmd%(os.path.join(imageSessPath, fileName), imagePath)
                    p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
                    stdoutdata, stderrdata  = p.communicate()
                    if stdoutdata:
                        cherrypy.log.error('Output: %s'%stdoutdata, 'DIAGRAM', logging.INFO)
                    if stderrdata:
                        cherrypy.log.error('Error: %s'%stderrdata, 'DIAGRAM', logging.ERROR)
                cache = ModelCore.CacheImage(imagePath)
                root.cache_diagram[latex] = cache
                transaction.commit()
        connection.close() #close cache
        db.close()
        storage.close()
        return {'actions':actions}
Ejemplo n.º 13
0
 def opendb(self):
     addr = (self.options.host, self.options.port)
     self._storage = ClientStorage.ClientStorage(addr)
     self.db = DB(self._storage)
     self.poollock = Lock()
Ejemplo n.º 14
0
 def __init__(self):
     storage = FileStorage.FileStorage('database.fs')
     db = DB(storage)
     self.dba = db
     db.pack()
     self.connection = db.open()
Ejemplo n.º 15
0
#!/usr/bin/env python3
#-*-coding:utf-8-*-


from ZODB import  DB


if __name__ == '__main__':
   # storage=FileStorage.FileStorage('test.zodb')
    db=DB('test.fs')
    connection=db.open()
    root=connection.root()

    print(root)


    for key in sorted(root):
        print(root[key])


    import transaction
    transaction.commit()
    db.close()



Ejemplo n.º 16
0
    def get_structure_parametres(self, device_param):

        try:
            response = requests.get(self.params['device_page'])
            response.raise_for_status()
            # обработка исключений 403, 404 и т.д.
        except HTTPError as http_err:
            print(f'HTTP error occurred: {http_err}')
        except Exception as err:
            print(f'Other error occurred: {err}')
        else:

            # подключаем драйвер
            driver = webdriver.Chrome()
            # в качестве параметра передаем веб-страницу проекта
            driver.get(self.params['device_page'])
            #находим элемент по параметру устройства (device_param)
            element = driver.find_element_by_partial_link_text(device_param)

            parents_path = []
            new_path = '..'
            #находим элемент по XPath
            parent = element.find_element_by_xpath(new_path)
            #находим XPath до тега 'body'
            while (parent.tag_name != 'body'):
                parents_path.append(parent.tag_name)
                new_path += '/..'
                parent = element.find_element_by_xpath(new_path)

            xpath = '/'
            for e in parents_path[::-1]:
                xpath += "/" + e
            xpath += "/" + element.tag_name
            #   print('FIND PATH', xpath)
            #находим остальные устройства по XPath
            result = driver.find_elements(By.XPATH, xpath)
            #   print(driver.find_elements(By.XPATH, xpath))
            #массив, в котором будет содержаться контент страниц устройств
            content_of_pages = []
            for i in result:
                content_of_pages.append(i.text)
            # print(content_of_pages)
            #массив, в котором содержатся все ссылки на устройства
            links = []
            years = []
            for i in result:
                links.append(i.get_attribute("href"))
            #  print(links)
            #находим информацию о каждом устройстве
            for j in links:
                URL = j
                session = requests.Session()
                try:
                    request = session.get(URL)
                except pip._vendor.requests.exceptions.InvalidSchema:
                    print('No connection adapters')
                else:
                    soup = BeautifulSoup(request.text, 'html.parser')
                    c = soup.text
                    #записываем всю информацию об устройстве в 1 строку
                    content = c.split()
                    content1 = " ".join(content)
                    # print(content1)
                    year_of_release = re.search('[20]\d{3}', content1)
                    if result:
                        years.append(year_of_release.group(0))

    # print(years)

        storage = FileStorage.FileStorage(
            'parametres_of_device.fs')  # создаем файл parametres_of_device.fs
        db = DB(storage)
        connection = db.open()  # открываем БД
        root = connection.root(
        )  # Объект подключения позволяет получить доступ к корневому контейнеру с помощью вызова метода root()
        root['year'] = years
        transaction.commit()  # сохранениe изменений в БД
        connection.close()  # закроем соединение
        print(root.items())  # проверяем, что сохранилось в БД
Ejemplo n.º 17
0
 def __init__(self, filename):
     storage = FileStorage.FileStorage(filename)
     self.db = DB(storage)
     self.conn = self.db.open()
     self.root = self.conn.root()
Ejemplo n.º 18
0
 def open_storage(self, filename):
     storage = FileStorage.FileStorage(filename)
     db = DB(storage)
     connection = db.open()
     root = connection.root()
     return root, storage
Ejemplo n.º 19
0
#
############################################################################
import os
import sys

import Globals  # for data
from ZODB import DB
from ZODB import FileStorage
import transaction

from Products.Sessions.BrowserIdManager import BrowserIdManager
from Products.Sessions.SessionDataManager import SessionDataManager
from Products.TemporaryFolder.TemporaryFolder import MountedTemporaryFolder

fs = FileStorage.FileStorage(os.path.join(Globals.data_dir, 'Data.fs.in'))
db = DB(fs)
conn = db.open()
root = conn.root()
app = root['Application']

print "Patching Data.fs.in"

tf = MountedTemporaryFolder('temp_folder', 'Temporary Folder')
app._setObject('temp_folder', tf)

bid = BrowserIdManager('browser_id_manager', 'Browser Id Manager')
app._setObject('browser_id_manager', bid)

sdm = r.SessionDataManager('session_data_manager',
                           title='Session Data Manager',
                           path='/temp_folder/transient_container',
Ejemplo n.º 20
0
 def open_connection(self, file_name):
     self.file_storage = ZFS.FileStorage(file_name)
     db = DB(self.file_storage)
     self.conn = db.open()
     return self.conn
Ejemplo n.º 21
0
def main():
    fs = FileStorage(u"dangle.fs")
    db = DB(fs)
    create_dangling_ref(db)
    db.close()
Ejemplo n.º 22
0
#!/usr/bin/python

import os, sys, re, copy, logging, traceback
from ZODB.FileStorage import FileStorage
from ZODB import DB
from logging import debug, error

logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('ZODB').setLevel(logging.INFO)
D = os.path.expanduser('~/done/')

l = os.listdir(D)
dbs = [(x, D + x) for x in l if re.match(r'[pb]\d\d\d$', x)]
dbs.sort()
for (r, f) in dbs:
    try:
        c = DB(FileStorage(f)).open()
        root = c.root()
        assert len(root[r].scores) == root[r].final_gen_num + 1
        if '-d' in sys.argv:
            copy.deepcopy(root[r])
        c.close()
        c.db().close()
        debug('%s OK', r)
    except Exception, e:
        error('%s FAIL\n%s', r, traceback.format_exc())
Ejemplo n.º 23
0
 def __init__(self, path):
     self.storage = FileStorage.FileStorage(path)
     self.db = DB(self.storage)
     self.connection = self.db.open()
     self.dbroot = self.connection.root()
Ejemplo n.º 24
0
    def checkRestoreWithMultipleObjectsInUndoRedo(self):
        from ZODB.FileStorage import FileStorage

        # Undo creates backpointers in (at least) FileStorage.  ZODB 3.2.1
        # FileStorage._data_find() had an off-by-8 error, neglecting to
        # account for the size of the backpointer when searching a
        # transaction with multiple data records.  The results were
        # unpredictable.  For example, it could raise a Python exception
        # due to passing a negative offset to file.seek(), or could
        # claim that a transaction didn't have data for an oid despite
        # that it actually did.
        #
        # The former failure mode was seen in real life, in a ZRS secondary
        # doing recovery.  On my box today, the second failure mode is
        # what happens in this test (with an unpatched _data_find, of
        # course).  Note that the error can only "bite" if more than one
        # data record is in a transaction, and the oid we're looking for
        # follows at least one data record with a backpointer.
        #
        # Unfortunately, _data_find() is a low-level implementation detail,
        # and this test does some horrid white-box abuse to test it.

        is_filestorage = isinstance(self._storage, FileStorage)

        db = DB(self._storage)
        c = db.open()
        r = c.root()

        # Create some objects.
        r["obj1"] = MinPO(1)
        r["obj2"] = MinPO(1)
        transaction.commit()

        # Add x attributes to them.
        r["obj1"].x = 'x1'
        r["obj2"].x = 'x2'
        transaction.commit()

        r = db.open().root()
        self.assertEquals(r["obj1"].x, 'x1')
        self.assertEquals(r["obj2"].x, 'x2')

        # Dirty tricks.
        if is_filestorage:
            obj1_oid = r["obj1"]._p_oid
            obj2_oid = r["obj2"]._p_oid
            # This will be the offset of the next transaction, which
            # will contain two backpointers.
            pos = self._storage.getSize()

        # Undo the attribute creation.
        info = self._storage.undoInfo()
        tid = info[0]['id']
        t = Transaction()
        self._storage.tpc_begin(t)
        oids = self._storage.undo(tid, t)
        self._storage.tpc_vote(t)
        self._storage.tpc_finish(t)

        r = db.open().root()
        self.assertRaises(AttributeError, getattr, r["obj1"], 'x')
        self.assertRaises(AttributeError, getattr, r["obj2"], 'x')

        if is_filestorage:
            # _data_find should find data records for both objects in that
            # transaction.  Without the patch, the second assert failed
            # (it claimed it couldn't find a data record for obj2) on my
            # box, but other failure modes were possible.
            self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
            self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)

            # The offset of the next ("redo") transaction.
            pos = self._storage.getSize()

        # Undo the undo (restore the attributes).
        info = self._storage.undoInfo()
        tid = info[0]['id']
        t = Transaction()
        self._storage.tpc_begin(t)
        oids = self._storage.undo(tid, t)
        self._storage.tpc_vote(t)
        self._storage.tpc_finish(t)

        r = db.open().root()
        self.assertEquals(r["obj1"].x, 'x1')
        self.assertEquals(r["obj2"].x, 'x2')

        if is_filestorage:
            # Again _data_find should find both objects in this txn, and
            # again the second assert failed on my box.
            self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
            self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)

        # Indirectly provoke .restore().  .restore in turn indirectly
        # provokes _data_find too, but not usefully for the purposes of
        # the specific bug this test aims at:  copyTransactionsFrom() uses
        # storage iterators that chase backpointers themselves, and
        # return the data they point at instead.  The result is that
        # _data_find didn't actually see anything dangerous in this
        # part of the test.
        self._dst.copyTransactionsFrom(self._storage)
        self.compare(self._storage, self._dst)
Ejemplo n.º 25
0
    def _PackWhileWriting(self, pack_now):
        # A storage should allow some reading and writing during
        # a pack.  This test attempts to exercise locking code
        # in the storage to test that it is safe.  It generates
        # a lot of revisions, so that pack takes a long time.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        for i in range(10):
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        choices = list(range(10))
        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        # How many client threads should we run, and how long should we
        # wait for them to finish?  Hard to say.  Running 4 threads and
        # waiting 30 seconds too often left a thread still alive on Tim's
        # Win98SE box, during ZEO flavors of this test.  Those tend to
        # run one thread at a time to completion, and take about 10 seconds
        # per thread.  There doesn't appear to be a compelling reason to
        # run that many threads.  Running 3 threads and waiting up to a
        # minute seems to work well in practice.  The ZEO tests normally
        # finish faster than that, and the non-ZEO tests very much faster
        # than that.
        NUM_LOOP_TRIP = 50
        timer = ElapsedTimer(time.time())
        threads = [
            ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
            for i in range(3)
        ]
        for t in threads:
            t.start()

        if pack_now:
            db.pack(time.time())
        else:
            db.pack(packt)

        for t in threads:
            t.join(60)
        liveness = [t.isAlive() for t in threads]
        if True in liveness:
            # They should have finished by now.
            print('Liveness:', liveness)
            # Combine the outcomes, and sort by start time.
            outcomes = []
            for t in threads:
                outcomes.extend(t.outcomes)
            # each outcome list has as many of these as a loop trip got thru:
            #     thread_id
            #     elapsed millis at loop top
            #     elapsed millis at attempt to assign to self.root[index]
            #     index into self.root getting replaced
            #     elapsed millis when outcome known
            #     'OK' or 'Conflict'
            #     True if we got beyond this line, False if it raised an
            #         exception (one possible Conflict cause):
            #             self.root[index].value = MinPO(j)
            def cmp_by_time(a, b):
                return cmp((a[1], a[0]), (b[1], b[0]))

            outcomes.sort(cmp_by_time)
            counts = [0] * 4
            for outcome in outcomes:
                n = len(outcome)
                assert n >= 2
                tid = outcome[0]
                print('tid:%d top:%5d' % (tid, outcome[1]), end=' ')
                if n > 2:
                    print('commit:%5d' % outcome[2], end=' ')
                    if n > 3:
                        print('index:%2d' % outcome[3], end=' ')
                        if n > 4:
                            print('known:%5d' % outcome[4], end=' ')
                            if n > 5:
                                print('%8s' % outcome[5], end=' ')
                                if n > 6:
                                    print('assigned:%5s' % outcome[6], end=' ')
                counts[tid] += 1
                if counts[tid] == NUM_LOOP_TRIP:
                    print('thread %d done' % tid, end=' ')
                print()

            self.fail('a thread is still alive')

        self._sanity_check()
Ejemplo n.º 26
0
 def __init__(self, file):
     self.file = file
     self.db = DB(None)  #file)
     self.connection = self.db.open()
     self.root = self.connection.root()
Ejemplo n.º 27
0
 def __init__(self, addr=('localhost', 9100)):
     self.addr = addr
     self.storage = ClientStorage.ClientStorage(self.addr)
     self.db = DB(self.storage)
     self.conn = self.db.open()
     self.root = self.conn.root()
Ejemplo n.º 28
0
	def __init__(self):
		self.storage=FileStorage.FileStorage('Data.fs')
		self.db=DB(self.storage)
		self.conexion=self.db.open()
		self.raiz=self.conexion.root()
Ejemplo n.º 29
0
"""creates the battle log object. VERY DESTRUCTIVE."""
from ZEO import ClientStorage
from ZODB import DB
import transaction
import persistent.mapping
#from equanimity.world import wField, wPlayer, World

addr = 'localhost', 9101
storage = ClientStorage.ClientStorage(addr)
db = DB(storage)
conn = db.open()
logs = conn.root()


#TODO: index logs by when and where the battle happened.
def create():
    logs['battle'] = persistent.mapping.PersistentMapping()
    transaction.commit()
Ejemplo n.º 30
0
    result = convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)

    return result


class cdict(dict):
    def __init__(self, *args, **kwargs):
        super().__init__()
        self._p_changed = 0


storage = FileStorage.FileStorage(os.path.join(BASE_DIR, "storage",
                                               "Storage.fs"),
                                  pack_keep_old=False)
zopedb = DB(storage)
connection = zopedb.open()

root = connection.root()
# breakpoint()

# requests_cache.install_cache('demo_cache')

headers = {
    'x-rapidapi-host': "api-football-v1.p.rapidapi.com",
    'x-rapidapi-key': TOKEN
}


def error_connection_continue_work(f):
    def wrapper(*args, **kwargs):