def updateInstance(args, instance): update_modules = True i = 0 sleep_time = 5 max_time = 1800 max_incrementation = (max_time/sleep_time) ufload.progress("Updating modules for instance {}".format(instance)) while update_modules and i < max_incrementation: update_modules = False try: netrpc = ufload.db.connect_rpc(args, instance) except oerplib.error.RPCError as err: ufload._progress("error.RPCError: {0}".format(err[0])) # regex = r""".*Cannot check for updates: There is/are [0-9]+ revision\(s\) available.""" # flags = re.S # if re.compile(regex, flags).match(err[0]) or err[0].endswith('Server is updating modules ...'): # update_modules = True # elif err[0].endswith('ServerUpdate: Server is updating modules ...'): if err[0].endswith('ServerUpdate: Server is updating modules ...'): update_modules = True else: raise oerplib.error.RPCError(err) except socket.error as err: update_modules = True for j in range(sleep_time): sys.stdout.write(next(spinner)) sys.stdout.flush() time.sleep(1) sys.stdout.write('\b') i +=1 sys.stdout.write('\r') if i >= max_incrementation and not update_modules: raise ValueError("tolong wait for updating module instance %s".format(instance))
def get_hwid(args): if sys.platform == 'win32': import _winreg try: with _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, "SYSTEM\ControlSet001\services\eventlog\Application\openerp-web-6.0", 0, _winreg.KEY_READ) as registry_key: hwid, regtype = _winreg.QueryValueEx(registry_key, "HardwareId") ufload.progress("Hardware id from registry key: %s" % hwid) return hwid except WindowsError as e: ufload._progress(e.message) return None else: # Follow the same algorithm that Unifield uses (see sync_client.py) mac = [] for line in os.popen("/sbin/ifconfig"): if line.find('Ether') > -1: mac.append(line.split()[4]) mac.sort() hw_hash = hashlib.md5(''.join(mac)).hexdigest() return hw_hash
def _cmdRestore(args): # if args.sync: # if not _required(args, [ 'syncuser', 'syncpw' ]): # return 2 if args.autosync is not None: if not _required(args, [ 'sync' ]): if not _required(args, [ 'synclight' ]): ufload.progress("Load sync server (-load-sync-server or -load-sync-server-no-update) argument is mandatory for auto-sync") return 2 # if the parameter nopwreset is not defined, adminpw and userspw are mandatory. if not args.nopwreset: if args.adminpw is None or args.userspw is None: ufload.progress("-adminpw AND -userspw are mandatory if -nopwreset is not set") return 2 if args.file is not None: rc, dbs = _fileRestore(args) elif args.dir is not None: rc, dbs = _dirRestore(args) else: rc, dbs = _multiRestore(args) if rc != 0: return rc ss = 'SYNC_SERVER_LOCAL' if args.ss is not None: ss = args.ss if args.db_prefix: ss = '%s_%s' % (args.db_prefix, ss) if args.sync or args.synclight: # Restore a sync server (LIGHT WITH MASTER) rc = _syncRestore(args, dbs, ss) if args.sync or args.synclight or args.autosync or args.ss is not None: # Update instances sync settings for db in dbs: ufload._progress("Connection settings for %s" % db) #Defines sync server connection settings on each instance ufload.db.sync_server_settings(args, ss, db) if args.sync or args.autosync or args.synclight or args.ss is not None: #Connects each instance to the sync server (and sets pwd) ufload.db.connect_instance_to_sync_server(args, ss, db) _syncLink(args, dbs, ss) return rc
def _cmdRestore(args): # if args.sync: # if not _required(args, [ 'syncuser', 'syncpw' ]): # return 2 if args.autosync is not None: if not _required(args, ['sync']): ufload.progress( "Load sync server (-load-sync-server) argument is mandatory for auto-sync" ) return 2 if args.file is not None: rc, dbs = _fileRestore(args) elif args.dir is not None: rc, dbs = _dirRestore(args) else: rc, dbs = _multiRestore(args) if rc != 0: return rc ss = 'SYNC_SERVER_LOCAL' if args.ss: ss = args.ss if args.sync or args.synclight: # Restore a sync server (LIGHT WITH MASTER) rc = _syncRestore(args, dbs, ss) if args.sync is not None or args.synclight is not None or args.autosync is not None or args.ss is not None: # Update instances sync settings for db in dbs: ufload._progress("Connection settings for %s" % db) #Defines sync server connection settings on each instance ufload.db.sync_server_settings(args, ss, db) if args.sync or args.autosync or args.synclight: #Connects each instance to the sync server (and sets pwd) ufload.db.connect_instance_to_sync_server(args, ss, db) _syncLink(args, dbs, args.ss) return rc
def _cmdClean(args): nb = ufload.db.cleanDbs(args) if nb == 1: ufload._progress('One database has been deleted') elif nb > 1: ufload._progress('%s databases have been deleted' % nb) else: ufload._progress('No database to delete found') return 0
def _cmdUpgrade(args): summarize = { 'initial_version': '', 'last_version': '', 'user_rights_updated': '' } #Install the patch on the sync server ss = 'SYNC_SERVER_LOCAL' if args.ss: ss = args.ss if args.patchcloud is not None: if not _required(args, ['adminuser', 'adminpw']): return 2 #Connect to OD (cloud access) info = ufload.cloud.get_cloud_info(args, args.patchcloud) ufload.progress('site=%s - path=%s - dir=%s' % (info.get('site'), info.get('path'), info.get('dir'))) dav = ufload.cloud.get_onedrive_connection(args) #Check for a zip file in the folder patches = ufload.cloud.list_patches(user=info.get('login'), pw=info.get('password'), where=info.get('dir'), dav=dav, url=info.get('url'), site=info.get('site'), path=info.get('path')) if len(patches) == 0: ufload.progress("No upgrade patch found.") return 1 #Download the patch patches.sort(key=lambda s: map( int, re.split('\.|-|p', re.search('uf(.+?)\.patch\.zip', s[1], re.I).group(1)))) i = 0 for j in patches: filename = dav.download(j[2], j[1]) #Set patch and version args args.patch = filename m = re.search('(.+?)\.patch\.zip', filename) if m: args.version = m.group(1) if ufload.db.installPatch(args, ss) == 0: i += 1 else: summarize['initial_version'] = args.version summarize['last_version'] = args.version os.remove(filename) if i == 0: ufload.progress("No new patches found") if args.userrightscloud is None or not args.forcesync: return 0 else: ufload.db.updateInstance(args, ss) else: if not _required(args, ['patch', 'version', 'adminuser', 'adminpw']): return 2 if ufload.db.installPatch(args, ss) == -1: ufload.progress("No new patches found") if args.userrightscloud is None or not args.forcesync: return 0 #List instances inst = [] if args.i is not None: instances = [x for x in args.i] else: instances = ufload.db._allDbs(args) #Update hardware_id and entity names in the Sync Server _syncLink(args, instances, ss) update_src = True update_available = False #Upgrade Unifield for instance in instances: if instance and instance != ss: ufload._progress("Connecting instance %s to sync server %s" % (instance, ss)) try: ufload.db.connect_instance_to_sync_server(args, ss, instance) except oerplib.error.RPCError as err: if err[0].endswith( "OpenERP version doesn't match database version!"): ufload.progress("new versions is present") update_available = True else: raise oerplib.error.RPCError(err) i = 0 while update_src: try: ufload.db.manual_sync(args, ss, instance) except oerplib.error.RPCError as err: regex = r""".*Cannot check for updates: There is/are [0-9]+ revision\(s\) available.""" flags = re.S if re.compile(regex, flags).match(err[0]): update_available = True break elif err[0].endswith( 'Authentification Failed, please contact the support' ): if i >= 10: raise oerplib.error.RPCError(err) time.sleep(1) i += 1 else: raise oerplib.error.RPCError(err) update_src = False break if not update_src: ufload.progress("No valid Update valid.") break if update_available: ufload.progress("Upgrading Unifield App") ufload.db.manual_upgrade(args, ss, instance) ufload.progress("Awaiting the restart of Unifield") starting_up = True i = 0 sleep_time = 1 max_time = 300 max_incrementation = (max_time / sleep_time) sys.stdout.flush() while starting_up and i < max_incrementation: sys.stdout.write(next(ufload.db.spinner)) sys.stdout.flush() time.sleep(sleep_time) starting_up = True i += 1 try: r = requests.get( "http://127.0.0.1:8061/openerp/login?db=&user="******"No User Rights found.") return 1 patches.sort(key=lambda s: map( int, re.split('\.|-|p', re.search('User Rights v(.+?).zip', s[1], re.I).group(1))) ) urfilename = None for j in patches: urfilename = dav.download(j[2], j[1]) if urfilename is not None: #Set patch and version args args.user_rights_zip = urfilename summarize['user_rights_updated'] = re.search( 'User Rights v(.+?).zip', urfilename, re.I).group(1) try: ufload.db.installUserRights(args, ss) except oerplib.error.RPCError as err: if err[0].endswith('exists on server'): ufload.progress(err[0].split("\n")[-1]) summarize['user_rights_updated'] = '' else: raise oerplib.error.RPCError(err) os.remove(urfilename) if args.forcesync and (not args.userrightscloud or (args.userrightscloud and summarize['user_rights_updated'] != '')): if instance and instance != ss: for instance in instances: ufload._progress("Connecting instance %s to sync server %s" % (instance, ss)) ufload.db.connect_instance_to_sync_server(args, ss, instance) ufload._progress( "synchonisation instance %s with sync server %s" % (instance, ss)) ufload.db.manual_sync(args, ss, instance) if (args.autosync or args.silentupgrade) and update_src: for instance in instances: if instance: ufload._progress("Connecting instance %s to sync server %s" % (instance, ss)) ufload.db.connect_instance_to_sync_server(args, ss, instance) #ufload._progress("Update instance %s" % instance) #ufload.db.updateInstance(instance) if args.autosync: #activate auto-sync (now + 1 hour) ufload.db.activate_autosync(args, instance, ss) if args.silentupgrade: #activate silent upgrade ufload.db.activate_silentupgrade(args, instance) ufload.progress(" *** summarize ***") ufload.progress(" * Initial version installed: {}".format( summarize['initial_version'])) ufload.progress(" * Last version installed: {}".format( summarize['last_version'])) if args.userrightscloud is not None: ufload.progress(" * User Rights updated : {}".format( summarize['user_rights_updated'] if summarize['user_rights_updated'] else 'None')) return 0
def _multiRestore(args): if not _required(args, ['user', 'pw']): ufload.progress( 'With no -file or -dir argument, cloud credentials are mandatory.') return 2, None if args.i is None: if not _required(args, ['oc']): ufload.progress( 'With no -file or -dir argument, you must use -i or -oc.') return 2, None ufload.progress("Multiple Instance restore for all instances in %s" % args.oc) else: if not args.oc: ufload.progress( 'Argument -oc not provided, please note that ufload will look for a OC pattern in the -i arguments (you might want to avoid partial substrings)' ) ufload.progress( "Multiple Instance restore for instances matching: %s" % " or ".join(args.i)) if args.workingdir: try: os.mkdir(args.workingdir) except: pass os.chdir(args.workingdir) #Create a temp directory to unzip files try: os.mkdir('ufload_temp') except: pass #Change working directory os.chdir('ufload_temp') #Cloud access info = ufload.cloud.get_cloud_info(args) ufload.progress('site=%s - path=%s - dir=%s' % (info.get('site'), info.get('path'), info.get('dir'))) dav = ufload.cloud.get_onedrive_connection(args) if not args.oc: #foreach -i add the dir dirs = [] instances = {} baseurl = dav.baseurl.rstrip('/') for substr in args.i: if args.exclude is None or not ufload.cloud._match_instance_name( args.exclude, substr): dirs.append(ufload.cloud.instance_to_dir(substr)) #Remove duplicates dirs = list(set(dirs)) #Get the list for every required OC for dir in dirs: dav.change_oc(baseurl, dir) instances.update( ufload.cloud.list_files(user=info.get('login'), pw=info.get('password'), where=dir + args.cloud_path, instances=args.i, dav=dav, url=info.get('url'), site=dir, path=info.get('path'))) else: instances = ufload.cloud.list_files(user=info.get('login'), pw=info.get('password'), where=info.get('dir'), instances=args.i, dav=dav, url=info.get('url'), site=info.get('site'), path=info.get('path')) ufload.progress("Instances to be restored: %s" % ", ".join(instances.keys())) dbs = [] pattern = re.compile('.*-[A-Z]{1}[a-z]{2}\.zip$') for i in instances: if args.exclude is not None and ufload.cloud._match_instance_name( args.exclude, i): ufload._progress( "%s matches -exclude param %s and will not be processed" % (i, args.exclude)) continue files_for_instance = instances[i] for j in files_for_instance: #If filename doesn't match UniField auto-upload filename pattern, go to next file if not pattern.match(j[1]): continue ufload.progress("Trying file %s" % j[1]) #If -oc is not known, change the connection settings according to the current instance if not args.oc: if i.endswith('_OCA'): dav.change_oc(baseurl, 'OCA') elif i.startswith('OCB'): dav.change_oc(baseurl, 'OCB') elif i.startswith('OCG_'): dav.change_oc(baseurl, 'OCG') elif i.startswith('OCP_'): dav.change_oc(baseurl, 'OCP') filename = dav.download(j[0], j[1]) filesize = os.path.getsize(filename) / (1024 * 1024) ufload.progress("File size: %s Mb" % filesize) n = ufload.cloud.peek_inside_local_file(j[0], filename) '''n = ufload.cloud.peek_inside_file(j[0], j[1], user=args.user, pw=args.pw, dav=dav, where=_ocToDir(args.oc)) ''' if n is None: os.unlink(j[1]) # no dump inside of zip, try the next one continue db = _file_to_db(args, str(n)) if ufload.db.exists(args, db): ufload.progress("Database %s already exists." % db) os.unlink(j[1]) break else: ufload.progress("Database %s does not exist, restoring." % db) '''f, sz = ufload.cloud.openDumpInZip(j[0], j[1], user=args.user, pw=args.pw, where=_ocToDir(args.oc)) ''' fname, sz = ufload.cloud.openDumpInZip(j[1]) if fname is None: os.unlink(j[1]) continue db = _file_to_db(args, fname) if db is None: ufload.progress("Bad filename %s. Skipping." % fname) try: os.unlink(j[1]) except: pass continue rc = ufload.db.load_zip_into(args, db, j[1], sz) if rc == 0: dbs.append(db) if not args.noclean: rc = ufload.db.clean(args, db) if args.notify: subprocess.call([args.notify, db]) try: os.unlink(j[1]) except: pass # We got a good load, so go to the next instance. break try: os.unlink(j[1]) except Exception as ex: pass if args.ss is not None and args.sync is None and args.synclight is None: _syncLink(args, dbs, args.ss) try: #Change directory os.chdir('..') #Remove temporary directory (and whatever is in it) shutil.rmtree('ufload_temp', True) except: pass return 0, dbs