예제 #1
0
파일: db.py 프로젝트: MSF-OCB/ufload
def manual_sync(args, sync_server, db):
    if db.startswith('SYNC_SERVER'):
        return 0
    ufload.progress("manual sync instance %s to sync server %s" % (db, sync_server))
    netrpc = connect_rpc(args, db)
    sync_obj = netrpc.get('sync.client.sync_manager')

    sync_ids = sync_obj.search([])
    sync_obj.sync(sync_ids)
예제 #2
0
def _decrypt(pwd):
    pwd = pwd.strip()
    x = pwd[4:]
    try:
        x = base64.b64decode(x)
        return x
    except:
        ufload.progress('Unable to decode password')
        print sys.exc_info()[0]
예제 #3
0
파일: db.py 프로젝트: MSF-OCB/ufload
def _zipChecksum(path):
    ufload.progress("Validating patch checksum")
    with open(path, 'rb') as f:
        contents = f.read()
        # md5 accepts only chunks of 128*N bytes
        md5 = hashlib.md5()
        for i in range(0, len(contents), 8192):
            md5.update(contents[i:i + 8192])
    return md5.hexdigest()
예제 #4
0
def activate_silentupgrade(args, db):
    rc = psql(
        args,
        'update sync_client_sync_server_connection set automatic_patching = \'t\';',
        db)

    if not args.autosync:
        ufload.progress(
            "*** WARNING: Silent upgrade is enabled, but auto sync is not.")

    return rc
예제 #5
0
파일: db.py 프로젝트: lermit4/ufload
def installUserRights(args, db='SYNC_SERVER_LOCAL'):
    ufload.progress('Install user rights : {}'.format(args.user_rights_zip))
    if not args.user_rights_zip or not os.path.isfile(args.user_rights_zip):
        raise ValueError('The file {} not exist'.format(args.user_rights_zip))

    f = open(args.user_rights_zip, 'rb')
    plain_zip = f.read()
    f.close()
    # ur_name = args.user_rights_zip.split('.')[0]
    ur_name, ur_name_extension = os.path.splitext(args.user_rights_zip)
    context= {'run_foreground': True}
    netrpc = connect_rpc(args, db)

    sync_obj = netrpc.get('sync_server.user_rights.add_file')
    # netrpc.config['run_foreground'] = True
    ufload.progress("Download User Rights")
    sync_ids = sync_obj.search([])
    # result = sync_obj.import_zip(sync_ids, {'name': ur_name, 'zip_file': encodestring(plain_zip)})

    load_id = sync_obj.create( {'name': ur_name, 'zip_file': encodestring(plain_zip)})
    result = sync_obj.import_zip( [load_id], context)
    result = sync_obj.read( load_id, ['state', 'message'])
    if result['state'] != 'done':
        ufload.progress('Unable to load UR: %s' % result['message'])
        raise oerplib.error.RPCErro(result['message'])
    else:
        result = sync_obj.done( [load_id])
        ufload.progress('New UR file loaded')
        return result

    # loader = self.pool.get('sync_server.user_rights.add_file')
    # load_id = loader.create(cr, uid, {'name': ur_name, 'zip_file': encodestring(plain_zip)}, context=context)
    # loader.import_zip(cr, uid, [load_id], context=context)

    return result
예제 #6
0
def _cmdRestore(args):
    # if args.sync:
    #     if not _required(args, [ 'syncuser', 'syncpw' ]):
    #         return 2

    if args.autosync is not None:
        if not _required(args, [ 'sync' ]):
            if not _required(args, [ 'synclight' ]):
                ufload.progress("Load sync server (-load-sync-server or -load-sync-server-no-update) argument is mandatory for auto-sync")
                return 2

    # if the parameter nopwreset is not defined, adminpw and userspw are mandatory.
    if not args.nopwreset:
        if args.adminpw is None or args.userspw is None:
            ufload.progress("-adminpw AND -userspw are mandatory if -nopwreset is not set")
            return 2

    if args.file is not None:
        rc, dbs = _fileRestore(args)
    elif args.dir is not None:
        rc, dbs = _dirRestore(args)
    else:
        rc, dbs = _multiRestore(args)

    if rc != 0:
        return rc

    ss = 'SYNC_SERVER_LOCAL'
    if args.ss is not None:
        ss = args.ss
        if args.db_prefix:
            ss = '%s_%s' % (args.db_prefix, ss)

    if args.sync or args.synclight:
        # Restore a sync server (LIGHT WITH MASTER)
        rc = _syncRestore(args, dbs, ss)

    if args.sync or args.synclight or args.autosync or args.ss is not None:
        # Update instances sync settings
        for db in dbs:
            ufload._progress("Connection settings for %s" % db)
            #Defines sync server connection settings on each instance
            ufload.db.sync_server_settings(args, ss, db)
            if args.sync or args.autosync or args.synclight or args.ss is not None:
                #Connects each instance to the sync server (and sets pwd)
                ufload.db.connect_instance_to_sync_server(args, ss, db)

        _syncLink(args, dbs, ss)

    return rc
예제 #7
0
파일: db.py 프로젝트: lermit4/ufload
def sync_link(args, hwid, db, sdb, all=False):
    instance = _db_to_instance(args, db)
    #Create the instance in the sync server if it does not already exist
    rc = psql(args, 'insert into sync_server_entity (create_uid, create_date, write_date, write_uid, user_id, name, state) SELECT 1, now(), now(), 1, 1, \'%s\', \'validated\' FROM sync_server_entity WHERE NOT EXISTS (SELECT 1 FROM sync_server_entity WHERE name = \'%s\') ' % (instance, instance), sdb )

    if rc != 0:
        ufload.progress('Unable to create the instance %s on the sync server. Please add it manually.' % instance)
        #return rc

    if all:
        # Update hardware id for every instance
        return psql(args, 'update sync_server_entity set hardware_id = \'%s\';' % hwid, sdb)
    else:
        #Update hardware id for this instance
        return psql(args, 'update sync_server_entity set hardware_id = \'%s\' where name = \'%s\';' % (hwid, instance), sdb)
예제 #8
0
파일: db.py 프로젝트: lermit4/ufload
def _run(args, cmd, get_out=False, silent=False):
    if args.show:
        ufload.progress("Would run: " + str(cmd))
        rc = 0
    else:
        if silent or get_out:
            out = ""
            try:
                out = subprocess.check_output(cmd, env=pg_pass(args), stderr=subprocess.STDOUT)
                return 0, out
            except subprocess.CalledProcessError as exc:
                return exc.returncode, exc.output
        else:
            rc = subprocess.call(cmd, env=pg_pass(args))
    return rc
예제 #9
0
파일: db.py 프로젝트: lermit4/ufload
def connect_instance_to_sync_server(args, sync_server, db):
    #Temporary desactivation of auto-connect
    #return 0

    # if db.startswith('SYNC_SERVER'):
    #    return 0

    #oerp = oerplib.OERP('127.0.0.1', protocol='xmlrpc', port=12173, version='6.0')
    ufload.progress('Connecting instance %s to %s' % (db, sync_server))
    #netrpc = oerplib.OERP('127.0.0.1', protocol='xmlrpc', port=12173, timeout=1000, version='6.0')
    netrpc = oerplib.OERP('127.0.0.1', protocol='xmlrpc', port=8069, timeout=1000, version='6.0')
    netrpc.login(args.adminuser.lower(), args.adminpw, database=db)
    conn_manager = netrpc.get('sync.client.sync_server_connection')
    conn_ids = conn_manager.search([])
    conn_manager.write(conn_ids, {'password': args.adminpw})
    conn_manager.connect()
예제 #10
0
파일: db.py 프로젝트: lermit4/ufload
def clean(args, db):
    toClean = {}
    toKeep = {}

    i = _db_to_instance(args, db)
    toClean[i] = True
    toKeep[db] = True

    for d in _allDbs(args):
        i = _db_to_instance(args, d)
        #if not args.db_prefix and i and d not in toKeep and i in toClean:
        if i and d not in toKeep and i in toClean:
            ufload.progress("Cleaning other database for instance %s: %s" % (i, d))
            killCons(args, d)
            rc = psql(args, 'DROP DATABASE IF EXISTS \"%s\"'%d)
            if rc != 0:
                return rc
    return 0            
예제 #11
0
def _cmdRestore(args):
    # if args.sync:
    #     if not _required(args, [ 'syncuser', 'syncpw' ]):
    #         return 2

    if args.autosync is not None:
        if not _required(args, ['sync']):
            ufload.progress(
                "Load sync server (-load-sync-server) argument is mandatory for auto-sync"
            )
            return 2

    if args.file is not None:
        rc, dbs = _fileRestore(args)
    elif args.dir is not None:
        rc, dbs = _dirRestore(args)
    else:
        rc, dbs = _multiRestore(args)

    if rc != 0:
        return rc

    ss = 'SYNC_SERVER_LOCAL'
    if args.ss:
        ss = args.ss

    if args.sync or args.synclight:
        # Restore a sync server (LIGHT WITH MASTER)
        rc = _syncRestore(args, dbs, ss)

    if args.sync is not None or args.synclight is not None or args.autosync is not None or args.ss is not None:
        # Update instances sync settings
        for db in dbs:
            ufload._progress("Connection settings for %s" % db)
            #Defines sync server connection settings on each instance
            ufload.db.sync_server_settings(args, ss, db)
            if args.sync or args.autosync or args.synclight:
                #Connects each instance to the sync server (and sets pwd)
                ufload.db.connect_instance_to_sync_server(args, ss, db)

        _syncLink(args, dbs, args.ss)

    return rc
예제 #12
0
파일: cloud.py 프로젝트: lermit4/ufload
def _group_files_to_download(files):
    files.sort()
    files.reverse()
    ret = collections.defaultdict(lambda: [])

    for a in files:
        t, f, u = a
        #if '/' not in f:
        #   raise Exception("no slash in %s" % f)

        #isplit = f.rindex('/')
        #filename = f[isplit+1:]
        if '-' not in f:
            ufload.progress("filename is missing expected dash: " + f)
            continue

        instance = '-'.join(f.split('-')[:-1])
        ret[instance].append((u, f))

    return ret
예제 #13
0
def _syncLink(args, dbs, sdb):
    ufload.progress("Updating hardware id...")
    # Arrange that all instances use admin as the sync user
    ufload.db.sync_server_all_admin(args, sdb)

    # Hook up all the databases we are currently working on
    hwid = ufload.db.get_hwid(args)
    if hwid is None:
        ufload.progress(
            "No hardware id available, you will need to manually link your instances to %s."
            % sdb)
        return 0

    if args.ss and (args.sync is None and args.synclight is None):
        #We don't update hardware id for all local instances: instances from another server could be already connected
        all = False
    else:
        # We update hardware id for all local instances: it's a new sync server, so no instance is connected yet
        all = True
        ufload.db.psql(
            args, 'update sync_server_entity set hardware_id = \'%s\';' % hwid,
            sdb)

    for db in dbs:
        ufload.progress(
            "Updating hardware id and entity name for %s in sync server" % db)
        rc = ufload.db.sync_link(
            args, hwid, db, sdb, all
        )  #Update hardware_id and entity name (of the instance) in sync server db
        if rc != 0:
            return rc
    return 0
예제 #14
0
def _fileRestore(args):
    # Find the instance name we are loading into
    if args.i is not None:
        if len(args.i) != 1:
            ufload.progress("Expected only one -i argument.")
            return 3, None
        db = args.i[0]
    else:
        db = _file_to_db(args, args.file)
        if db is None:
            ufload.progress(
                "Could not set the instance from the filename. Use -i to specify it."
            )
            return 3, None

    try:
        statinfo = os.stat(args.file)
    except OSError as e:
        ufload.progress("Could not find file size: " + str(e))
        return 1, None

    with open(args.file, 'rb') as f:
        rc = ufload.db.load_dump_into(args, db, f, statinfo.st_size)

    if not args.noclean:
        rc = ufload.db.clean(args, db)

    if args.notify:
        subprocess.call([args.notify, db])

    if rc == 0:
        return 0, [db]
    else:
        return rc, None
예제 #15
0
def _get_all_files_and_timestamp(dav, d):
    ufload.progress('Listing files from dir %s' % d)
    try:
        #all_zip = dav.ls(d)
        all_zip = dav.list(d)
    except Exception as e:
        ufload.progress("Cloud Exception 88")
        logging.warn(str(e))
        return []

    ret = []
    for f in all_zip:
        #if not f['Name'] or f['Name'][-1] == '/':
        if not f['Name']:
            continue

        # We try to extract a timestamp to get an idea of the creation date
        #  Format: Mon, 14 Mar 2016 03:31:40 GMT
        t = time.strptime(f['TimeLastModified'], '%Y-%m-%dT%H:%M:%SZ')

        # We don't take into consideration backups that are too recent.
        # Otherwise they could be half uploaded (=> corrupted)
        if abs(time.time() - time.mktime(t)) < 900:
            continue

        ufload.progress('File found: %s' % f['Name'])

        if f['Name'].split(".")[-1] != "zip":
            logging.warn("Ignoring non-zipfile: %s" % f['Name'])
            continue
        ret.append((t, f['Name'], f['ServerRelativeUrl']))
    return ret
예제 #16
0
파일: cloud.py 프로젝트: lermit4/ufload
def peek_inside_file(path, fn, **kwargs):
    '''host, directory = _splitCloudName(kwargs['where'])
    dav = easywebdav.connect(host,
                            username=kwargs['user'],
                            password=kwargs['pw'],
                            protocol='https')
    '''

    try:
        z = zipfile.ZipFile(
            ufload.httpfile.HttpFile(dav.baseurl + path, dav.session.auth[0],
                                     dav.session.auth[1]))
    except Exception as e:
        ufload.progress("Zipfile %s: could not read: %s" % (fn, e))
        return None

    names = z.namelist()
    if len(names) == 0:
        ufload.progress("Zipfile %s has no files in it." % fn)
        return None
    if len(names) != 1:
        ufload.progress("Zipfile %s has unexpected files in it: %s" %
                        (fn, names))
        return None
    n = names[0]
    z.close()
    return n
예제 #17
0
파일: db.py 프로젝트: lermit4/ufload
def installPatch(args, db='SYNC_SERVER_LOCAL'):
    ufload.progress("Activating update_client module on %s database" % db)
    #Install the module update_client
    rc = psql(args, "UPDATE ir_module_module SET state = 'installed' WHERE name = 'update_client'", db)
    if rc != 0:
        return rc

    v = args.version
    ufload.progress("Installing v.%s patch on %s database" % (v, db))

    patch = os.path.normpath(args.patch)

    checksum = _zipChecksum(patch)

    rc, out = psql(args, "SELECT 1 FROM sync_server_version WHERE sum ='{}';".format(checksum), db, True)
    if not out.strip() and rc == 0:
        contents = base64.b64encode(_zipContents(patch))

        sql = "INSERT INTO sync_server_version (create_uid, create_date, write_date, write_uid, date, state, importance, name, comment, sum, patch) VALUES (1, NOW(), NOW(), 1, NOW(),  'confirmed', 'required', '%s', 'Version %s installed by ufload', '%s', '%s')" % (v, v, checksum, contents)
        # ufload.progress(sql)
        # Write sql to a file
        f = open('sql.sql', 'w')
        f.write(sql)
        f.close()

        rc = psql_file(args, 'sql.sql', db)
        os.remove('sql.sql');

        if rc != 0:
            return rc
        return 0
    else:
        ufload.progress("The v.%s patch on %s database is already installed!!" % (v, db))
        return -1
예제 #18
0
def _syncRestore(args, dbs, ss):
    if args.db_prefix:
        sdb = '%s_%s' % (args.db_prefix, ss)
    else:
        sdb = ss

    #Which Sync Server do we need?
    if args.synclight:
        #url = "http://sync-prod_dump.uf5.unifield.org/SYNC_SERVER_LIGHT_WITH_MASTER"
        url = "http://sync-prod_dump.rb.unifield.org/SYNC_SERVER_LIGHT_NO_UPDATE"
    else:
        url = "http://sync-prod_dump.rb.unifield.org/SYNC_SERVER_LIGHT_WITH_MASTER"

    try:
        r = requests.head(url,
                          auth=requests.auth.HTTPBasicAuth(
                              args.syncuser, args.syncpw))
        if r.status_code != 200:
            ufload.progress("HTTP HEAD error: %s" % r.status_code)
            return 1
    except KeyboardInterrupt as e:
        raise e
    except Exception as e:
        ufload.progress("Failed to fetch sync server: " + str(e))
        return 1

    sz = int(r.headers.get('content-length', 0))
    szdb = ufload.db.get_sync_server_len(args, sdb)

    if szdb == sz:
        ufload.progress("Sync server is up to date.")
        return 0

    r = requests.get(url,
                     auth=requests.auth.HTTPBasicAuth(args.syncuser,
                                                      args.syncpw),
                     stream=True)
    if r.status_code != 200:
        ufload.progress("HTTP GET error: %s" % r.status_code)
        return 1

    rc = ufload.db.load_dump_into(args, sdb, r.raw, sz)
    if rc != 0:
        return rc
    ufload.db.write_sync_server_len(args, sz, sdb)

    if not args.noclean:
        rc = ufload.db.clean(args, sdb)

    return _syncLink(args, dbs, sdb)
예제 #19
0
파일: db.py 프로젝트: MSF-OCB/ufload
def get_hwid(args):
    if sys.platform == 'win32':
        import _winreg
        try:
            with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
                                 "SYSTEM\ControlSet001\services\eventlog\Application\openerp-web-6.0",
                                 0, _winreg.KEY_READ) as registry_key:
                hwid, regtype = _winreg.QueryValueEx(registry_key, "HardwareId")
                ufload.progress("Hardware id from registry key: %s" % hwid)
                return hwid
        except WindowsError:
            return None
    else:
        # Follow the same algorithm that Unifield uses (see sync_client.py)
        mac = []
        for line in os.popen("/sbin/ifconfig"):
            if line.find('Ether') > -1:
                mac.append(line.split()[4])
                    
        mac.sort()
        hw_hash = hashlib.md5(''.join(mac)).hexdigest()
        return hw_hash
예제 #20
0
def cleanDbs(args):

    import re
    p = re.compile('^[A-Z0-9_]{5,}_[0-9]{8}_[0-9]{4}$')
    ps = re.compile('SYNC')

    nb = 0
    for d in _allDbs(args):

        m = p.match(d)
        ms = ps.search(d)

        if m == None and ms == None and d != '':
            ufload.progress("Dropping database %s" % d)
            killCons(args, d)
            rc = psql(args, 'DROP DATABASE IF EXISTS \"%s\"' % d)
            if rc != 0:
                ufload.progress("Error: unable to drop database %s" % d)
            else:
                nb = nb + 1

    return nb
예제 #21
0
파일: cloud.py 프로젝트: lermit4/ufload
def get_onedrive_connection(args):
    info = get_cloud_info(args)
    if not info.get('url'):
        ufload.progress('URL is not set!')
    if not info.get('login'):
        ufload.progress('login is not set!')
    if not info.get('password'):
        ufload.progress('Password is not set!')

    url = urlparse(info['url'])
    if not url.netloc:
        ufload.progress('Unable to parse url: %s') % (info['url'])

    path = info.get('site') + url.path

    try:
        dav = webdav.Client(url.netloc,
                            port=url.port,
                            protocol=url.scheme,
                            username=info['login'],
                            password=info['password'],
                            path=path)
    except webdav.ConnectionFailed, e:
        ufload.progress('Unable to connect: %s') % (e.message)
예제 #22
0
def _dirRestore(args):
    files = os.listdir(args.dir)
    dbs = []
    atleastone = False

    for file in files:
        db = _file_to_db(args, file)
        fullfile = '%s/%s' % (args.dir, file)
        if db is None:
            ufload.progress("Could not set the instance from the file %s." %
                            file)
        else:
            dbs.append(db)
            atleastone = True

        try:
            statinfo = os.stat(fullfile)
            sz = statinfo.st_size
        except OSError as e:
            ufload.progress("Could not find file size: " + str(e))
            sz = 0
            return 1, None

        with open(fullfile, 'rb') as f:
            rc = ufload.db.load_dump_into(args, db, f, sz)

        if not args.noclean:
            rc = ufload.db.clean(args, db)

        if args.notify:
            subprocess.call([args.notify, db])

    if atleastone:
        return 0, dbs
    else:
        return 2, None
예제 #23
0
파일: db.py 프로젝트: lermit4/ufload
def manual_upgrade(args, sync_server, db):
    if db.startswith('SYNC_SERVER'):
        return 0
    ufload.progress("manual update instance %s to sync server %s" % (db, sync_server))
    netrpc = connect_rpc(args, db)
    sync_obj = netrpc.get('sync_client.upgrade')

    ufload.progress("Download patch")
    sync_ids = sync_obj.search([])
    result = sync_obj.download(sync_ids)
    if result:
        ufload.progress("update Unifield")
        result = sync_obj.do_upgrade(sync_ids)
    return result
예제 #24
0
def connect_instance_to_sync_server(args, sync_server, db):
    #Temporary desactivation of auto-connect
    #return 0

    # if db.startswith('SYNC_SERVER'):
    #    return 0
    if 'SYNC_SERVER' in db:
        return 0

    port = 8069
    if args.sync_xmlrpcport:
        port = int(args.sync_xmlrpcport)
    _run_out(
        args,
        mkpsql(
            args, "update res_users set password='******' where login = '******';" %
            (args.connectionpw, args.connectionuser.lower()), sync_server))

    try:
        #oerp = oerplib.OERP('127.0.0.1', protocol='xmlrpc', port=12173, version='6.0')
        ufload.progress('Connecting instance %s to %s' % (db, sync_server))
        #netrpc = oerplib.OERP('127.0.0.1', protocol='xmlrpc', port=12173, timeout=1000, version='6.0')
        #netrpc = oerplib.OERP('127.0.0.1', protocol='xmlrpc', port=8069, timeout=1000, version='6.0')
        netrpc = oerplib.OERP('127.0.0.1',
                              protocol='xmlrpc',
                              port=port,
                              timeout=1000,
                              version='6.0')
        netrpc.login(args.adminuser.lower(), args.adminpw, database=db)
        conn_manager = netrpc.get('sync.client.sync_server_connection')
        conn_ids = conn_manager.search([])
        #conn_manager.write(conn_ids, {'password': args.adminpw})
        conn_manager.write(conn_ids, {
            'login': args.connectionuser,
            'password': args.connectionpw
        })
        conn_manager.connect()
        #netrpc.get('sync.client.entity').sync()
    except oerplib.error.RPCError as e:
        ufload.progress(
            "Error: unable to connect instance to the sync server: %s" %
            e.args[0])
    except:
        ufload.progress(
            "Unexpected error: unable to connect instance to the sync server: %s"
            % sys.exc_info()[0])
예제 #25
0
def main():
    global args
    args = parse()
    if hasattr(args, "func"):
        try:
            rc = args.func(args)
        except KeyboardInterrupt:
            rc = 1

    ufload.progress("ufload is done working :-)")

    if args.remote:
        import socket
        hostname = socket.gethostname() or 'unknown'
        ufload.progress("Will exit with result code: %d" % rc)
        ufload.progress("Posting logs to remote server.")
        requests.post(args.remote+"?who=%s"%hostname, data='\n'.join(_logs))

    sys.exit(rc)
예제 #26
0
파일: cloud.py 프로젝트: lermit4/ufload
def peek_inside_local_file(path, fn):
    try:
        z = zipfile.ZipFile(fn)
    except Exception as e:
        ufload.progress("Zipfile %s: could not read: %s" % (fn, e))
        return None

    names = z.namelist()
    if len(names) == 0:
        ufload.progress("Zipfile %s has no files in it." % fn)
        return None
    if len(names) != 1:
        ufload.progress("Zipfile %s has unexpected files in it: %s" %
                        (fn, names))
        return None
    n = names[0]
    z.close()
    del z
    return n
예제 #27
0
파일: cloud.py 프로젝트: lermit4/ufload
def dlProgress(pct):
    ufload.progress("Downloaded %d%%" % pct)
예제 #28
0
def archive(args):
    v = ver(args)
    if len(v) < 1 or '9.5' not in v[0]:
        ufload.progress('Postgres 9.5 is required.')
        return 1

    for dsn in args.from_dsn:
        x = _parse_dsn(dsn)
        if 'dbname' not in x:
            ufload.progress('DSN is missing dbname.')
            return 1

        ufload.progress("Archive operations_event from %s" % x['dbname'])
        rc, out = _run(args,
                       mkpsql(
                           args, '''
create extension if not exists dblink;
insert into operations_event (instance, kind, time, remote_id, data)
  select * from
    dblink('%s', 'select instance, kind, time, id, data from operations_event') as
    table_name_is_ignored(instance character varying(64),
       kind character varying(64),
       time timestamp without time zone,
       id integer,
       data text)
    on conflict do nothing;''' % (dsn, ), 'archive'),
                       get_out=True)
        ufload.progress(_clean(out))

        ufload.progress("Archive operations_count from %s" % x['dbname'])
        rc, out = _run(args,
                       mkpsql(
                           args, '''
create extension if not exists dblink;
insert into operations_count (instance, kind, time, count, remote_id)
  select * from
    dblink('%s', 'select instance, kind, time, count, id from operations_count') as
    table_name_is_ignored(instance character varying(64),
       kind character varying(64),
       time timestamp without time zone,
       count integer,
       id integer)
    on conflict do nothing;''' % (dsn, ), 'archive'),
                       get_out=True)
        ufload.progress(_clean(out))
예제 #29
0
def delive(args, db):
    if args.live:
        ufload.progress(
            "*** WARNING: The restored database has LIVE passwords and LIVE syncing and LIVE settings for automated imports/exports."
        )
        if args.sync:
            ufload.progress(
                "(please note that ufload is not able to connect to the sync server using live passwords, please connect manually)"
            )
        return 0

    adminuser = args.adminuser.lower()
    port = 8069
    if args.sync_xmlrpcport:
        port = int(args.sync_xmlrpcport)

    ss = 'SYNC_SERVER_LOCAL'
    if args.ss:
        ss = args.ss

    # change the sync config to local
    if args.db_prefix:
        pfx = args.db_prefix + '_'
    else:
        pfx = ''
    rc = psql(
        args,
        'update sync_client_sync_server_connection set automatic_patching = \'f\', protocol = \'xmlrpc\', login = \'%s\', database = \'%s%s\', host = \'127.0.0.1\', port = %d;'
        % (adminuser, pfx, ss, port), db)
    if rc != 0:
        return rc

    # disable cron jobs
    rc = psql(
        args,
        'update ir_cron set active = \'f\' where model = \'backup.config\';',
        db)
    if rc != 0:
        return rc
    rc = psql(
        args,
        'update ir_cron set active = \'f\' where model = \'msf.instance.cloud\';',
        db)
    if rc != 0:
        return rc
    rc = psql(
        args,
        'update ir_cron set active = \'f\' where model = \'sync.client.entity\';',
        db)
    if rc != 0:
        return rc
    rc = psql(
        args,
        'update ir_cron set active = \'f\' where model = \'stock.mission.report\';',
        db)
    if rc != 0:
        return rc

    #Automated import jobs
    rc = psql(
        args,
        'update ir_cron set active = \'f\' where model = \'automated.import\';',
        db)
    if rc != 0:
        return rc
    # Automated import settings
    rc = psql(
        args,
        'UPDATE automated_import SET report_path=\'\', src_path=\'\', ftp_url=\'\', dest_path=\'\', ftp_ok=\'f\', ftp_port=\'\',dest_path_failure=\'\', ftp_login=\'\', ftp_password=\'\', ftp_protocol=\'\';',
        db)
    #if rc != 0:
    #    return rc

    # Automated export jobs
    rc = psql(
        args,
        'update ir_cron set active = \'f\' where model = \'automated.export\';',
        db)
    if rc != 0:
        return rc
    # Automated export settings
    rc = psql(
        args,
        'UPDATE automated_export SET report_path=\'\', ftp_url=\'\', dest_path=\'\', ftp_ok=\'f\', ftp_port=\'\',dest_path_failure=\'\', ftp_login=\'\', ftp_password=\'\', ftp_protocol=\'\';',
        db)
    #if rc != 0:
    #    return rc

    # Now we check for arguments allowing auto-sync and silent-upgrade
    if args.autosync:
        activate_autosync(args, db, ss)
        rc = psql(
            args,
            'update ir_cron set active = \'t\', interval_type = \'hours\', interval_number = 2, nextcall = current_timestamp + interval \'1 hour\' where model = \'sync.client.entity\' and function = \'sync_threaded\';',
            db)
        if rc != 0:
            return rc
        rc = psql(
            args,
            'update sync_client_sync_server_connection SET host = \'127.0.0.1\', database = \'%s\';'
            % ss, db)

    if args.silentupgrade:
        if not args.autosync:
            ufload.progress(
                "*** WARNING: Silent upgrade is enabled, but auto sync is not."
            )
        rc = psql(
            args,
            'update sync_client_sync_server_connection set automatic_patching = \'t\';',
            db)
        if rc != 0:
            return rc

    # Set the backup directory
    directory = "E'd:\\\\'"
    if sys.platform != "win32" and args.db_host in [None, 'ct0', 'localhost']:
        # when loading on non-windows, to a local database, use /tmp
        directory = '\'/tmp\''

    rc = psql(
        args,
        'update backup_config set beforemanualsync=\'f\', beforepatching=\'f\', aftermanualsync=\'f\', beforeautomaticsync=\'f\', afterautomaticsync=\'f\', scheduledbackup=\'f\', name = %s;'
        % directory, db)
    if rc != 0:
        return rc

    # put the chosen password into all users
    if args.userspw:
        rc = psql(
            args, 'update res_users set password = \'%s\' WHERE id <> 1;' %
            args.userspw, db)

    if args.adminpw:
        rc = psql(
            args, 'update res_users set password = \'%s\' WHERE id = 1;' %
            args.adminpw, db)

    # else:
    #    rc = psql(args, 'update res_users set password = \'%s\';' % args.adminpw, db)

    if args.nopwreset:
        ufload.progress(
            "*** WARNING: The restored database has LIVE passwords.")
        return 0

    # set the username of the admin account
    rc = psql(args,
              'update res_users set login = \'%s\' where id = 1;' % adminuser,
              db)
    if rc != 0:
        return rc

    # put the chosen password into all users
    #rc = psql(args, 'update res_users set password = \'%s\';' % args.adminpw, db)
    if rc != 0:
        return rc

    if args.inactiveusers:
        rc = psql(
            args,
            "update res_users set active = 'f' where login not in ('synch', '%s');"
            % adminuser, db)

    if args.createusers:
        if args.adminpw != args.userspw:
            newpass = args.userspw
        else:
            newpass = args.adminpw

        if args.newuserspw:
            db_name = db
            if args.db_prefix:
                db_name = db_name.split(args.db_prefix + '_', 1)[1]
            new_pass_dict = []
            for pass_part in re.split('(\[\d+\+\d+\])', args.newuserspw):
                m = re.search('\[(\d+)\+(\d+)\]', pass_part)
                if m:
                    pos = int(m.group(1)) - 1
                    add = int(m.group(2))
                    new_pass_dict.append(
                        '%d' %
                        (max(ord(db_name[pos].lower()), 96) - 96 + add, ))
                else:
                    new_pass_dict.append(pass_part)
            if new_pass_dict:
                newpass = ''.join(new_pass_dict)

        for new_user_info in args.createusers.split(';'):
            new_user, groups = new_user_info.split(':')
            rc, new_userid = psql(
                args,
                """ insert into res_users (name, active, login, password, context_lang, company_id, view, menu_id) values
                ('%s', 't', '%s', '%s', 'en_MF', 1, 'simple', 1) returning id;"""
                % (new_user, new_user.lower(), newpass),
                db,
                silent=True)
            if rc != 0:
                return rc
            for new_group in groups.split(','):
                rc = psql(
                    args,
                    " insert into res_groups_users_rel (uid, gid) (select %s, id from res_groups where name='%s');"
                    % (new_userid, new_group), db)
                if rc != 0:
                    return rc

    # ok, delive finished with no problems
    return 0
예제 #30
0
def load_dump_into(args, db, f, sz):
    tot = float(sz)
    if sz == 0:
        ufload.progress("Note: No progress percent available.")

    db2 = db + "_" + str(os.getpid())

    ufload.progress("Create database " + db2)
    tablespace = ""
    if args.db_tablespace:
        tablespace = 'TABLESPACE "%s"' % args.db_tablespace
    rc = psql(args, 'CREATE DATABASE \"%s\" %s' % (db2, tablespace))
    if rc != 0:
        return rc

    # From here out, we need a try block, so that we can drop
    # the temp db if anything went wrong
    try:
        ufload.progress("Restoring into %s" % db2)

        cmd = pg_restore(args)
        cmd.append('--no-acl')
        cmd.append('--no-owner')
        cmd.append('-d')
        cmd.append(db2)
        cmd.append('-n')
        cmd.append('public')
        cmd.append('-S')
        cmd.append(args.db_user)
        cmd.append('--disable-triggers')

        # Windows pg_restore gets confused when reading from a pipe,
        # so write to a temp file first.
        if sys.platform == "win32":
            tf = tempfile.NamedTemporaryFile(delete=False)
            if not args.show:

                n = 0
                next = 10
                for chunk in iter(lambda: f.read(1024 * 1024), b''):
                    tf.write(chunk)
                    n += len(chunk)
                    if tot != 0:
                        pct = n / tot * 100
                        if pct > next:
                            ufload.progress("Loading data: %d%%" % int(pct))
                            next = int(pct / 10) * 10 + 10

            tf.close()
            cmd.append(tf.name)

            ufload.progress("Starting restore. This will take some time.")
            try:
                rc = _run(args, cmd)
            except KeyboardInterrupt:
                raise dbException(1)

            # clean up the temp file
            try:
                os.unlink(tf.name)
            except OSError:
                pass
        else:
            # For non-Windows, feed the data in via pipe so that we have
            # some progress indication.
            if not args.show:
                p = subprocess.Popen(cmd,
                                     bufsize=1024 * 1024 * 10,
                                     stdin=subprocess.PIPE,
                                     stdout=sys.stdout,
                                     stderr=sys.stderr,
                                     env=pg_pass(args))

                n = 0
                next = 10
                for chunk in iter(lambda: f.read(8192), b''):
                    try:
                        p.stdin.write(chunk)
                    except IOError:
                        break
                    n += len(chunk)
                    if tot != 0:
                        pct = n / tot * 100
                        if pct > next:
                            ufload.progress("Restoring: %d%%" % int(pct))
                            next = int(pct / 10) * 10 + 10

                p.stdin.close()
                ufload.progress("Restoring: 100%")
                ufload.progress("Waiting for Postgres to finish restore")
                rc = p.wait()
            else:
                ufload.progress("Would run: " + str(cmd))
                rc = 0

        rcstr = "ok"
        if rc != 0:
            rcstr = "error %d" % rc
        ufload.progress("Restore finished with result code: %s" % rcstr)
        _checkrc(rc)

        #USELESS FOR SYNC SERVER Let's delete uninstalled versions
        #rc = psql(args, 'DELETE FROM sync_server_version WHERE state!=\'installed\'', db)
        #_checkrc(rc)

        _checkrc(delive(args, db2))

        ufload.progress("Drop database " + db)
        killCons(args, db)
        rc = psql(args, 'DROP DATABASE IF EXISTS \"%s\"' % db)
        _checkrc(rc)

        ufload.progress("Rename database %s to %s" % (db2, db))
        rc = psql(args, 'ALTER DATABASE \"%s\" RENAME TO \"%s\"' % (db2, db))
        _checkrc(rc)

        return 0
    except dbException as e:
        # something went wrong, so drop the temp table
        ufload.progress("Unexpected error %s" % sys.exc_info()[0])
        ufload.progress("Cleanup: dropping db %s" % db2)
        killCons(args, db2)
        psql(args, 'DROP DATABASE \"%s\"' % db2)
        return e.rc
    except:
        ufload.progress("Unexpected error %s" % sys.exc_info()[0])
        ufload.progress("Cleanup: dropping db %s" % db2)
        killCons(args, db2)
        psql(args, 'DROP DATABASE \"%s\"' % db2)
        return 1