def setUp(self): try: bpio.rmdir_recursive('/tmp/.bitdust_tmp') except Exception: pass lg.set_debug_level(30) settings.init(base_dir='/tmp/.bitdust_tmp') try: os.makedirs('/tmp/.bitdust_tmp/metadata') except: pass automat.OpenLogFile('/tmp/.bitdust_tmp/logs/automats.log') self.my_current_key = None fout = open('/tmp/_some_priv_key', 'w') fout.write(_some_priv_key) fout.close() fout = open(settings.LocalIdentityFilename(), 'w') fout.write(_some_identity_xml) fout.close() self.assertTrue(key.LoadMyKey(keyfilename='/tmp/_some_priv_key')) self.assertTrue(my_id.loadLocalIdentity()) my_id.init() try: os.makedirs('/tmp/.bitdust_tmp/logs') except: pass local_fs.WriteTextFile('/tmp/.bitdust_tmp/logs/parallelp.log', '') tmpfile.init(temp_dir_path='/tmp/.bitdust_tmp/temp/') os.makedirs( '/tmp/.bitdust_tmp/backups/[email protected]_8084/1/F1234') try: bpio.rmdir_recursive('/tmp/_some_folder', ignore_errors=True) except: pass os.makedirs('/tmp/_some_folder')
def clear_brokers(customer_id): service_dir = settings.ServiceDir('service_private_groups') brokers_dir = os.path.join(service_dir, 'brokers') customer_dir = os.path.join(brokers_dir, customer_id) known_brokers(customer_id, erase_brokers=True) if os.path.isdir(customer_dir): bpio.rmdir_recursive(customer_dir, ignore_errors=True)
def setUp(self): try: bpio.rmdir_recursive('/tmp/.bitdust_tmp') except Exception: pass lg.set_debug_level(30) settings.init(base_dir='/tmp/.bitdust_tmp') self.my_current_key = None try: os.makedirs('/tmp/.bitdust_tmp/metadata/') except: pass try: os.makedirs('/tmp/.bitdust_tmp/identitycache/') except: pass fout = open('/tmp/_some_priv_key', 'w') fout.write(_some_priv_key) fout.close() fout = open(settings.LocalIdentityFilename(), 'w') fout.write(_some_identity_xml) fout.close() self.assertTrue(key.LoadMyKey(keyfilename='/tmp/_some_priv_key')) self.assertTrue(my_id.loadLocalIdentity()) self.bob_ident = identity.identity(xmlsrc=_another_identity_xml) identitycache.UpdateAfterChecking(idurl=self.bob_ident.getIDURL(), xml_src=_another_identity_xml)
def tearDown(self): backup_fs.shutdown() key.ForgetMyKey() my_id.forgetLocalIdentity() settings.shutdown() os.remove('/tmp/_some_priv_key') bpio.rmdir_recursive('/tmp/.bitdust_tmp')
def tearDown(self): automat.CloseLogFile() tmpfile.shutdown() key.ForgetMyKey() my_id.forgetLocalIdentity() settings.shutdown() os.remove('/tmp/_some_priv_key') bpio.rmdir_recursive('/tmp/.bitdust_tmp') bpio.rmdir_recursive('/tmp/_some_folder') os.remove('/tmp/random_file')
def setUp(self): try: bpio.rmdir_recursive('/tmp/.bitdust_tmp') except Exception: pass settings.init(base_dir='/tmp/.bitdust_tmp') lg.set_debug_level(30) try: os.makedirs('/tmp/.bitdust_tmp/logs') except: pass
def clone(callback=None): """ """ ethereum_location = os.path.join(settings.BaseDir(), "ethereum") if not os.path.isdir(ethereum_location): os.makedirs(ethereum_location) geth_location = os.path.join(ethereum_location, 'go-ethereum') if os.path.exists(geth_location): bpio.rmdir_recursive(geth_location) git_proc.run( ['clone', '--verbose', '--depth', '1', 'https://github.com/ethereum/go-ethereum', geth_location, ], base_dir=ethereum_location, env=os.environ, callback_func=callback, )
def erase(name, filename, why='no reason'): """ However you can remove not needed file immediately, this is a good way also. But outside of this module you better use method ``throw_out``. """ global _FilesDict if name in list(_FilesDict.keys()): try: _FilesDict[name].pop(filename, '') except: lg.warn( 'we do not know about file [%s] in sub folder %s, we tried because %s' % (filename, name, why)) else: lg.warn('we do not know sub folder: %s, we tried because %s' % (name, why)) if not os.path.exists(filename): lg.warn('[%s] not exist' % filename) return if os.path.isfile(filename): if not os.access(filename, os.W_OK): lg.warn('[%s] no write permissions' % filename) return try: os.remove(filename) if _Debug: lg.out(_DebugLevel, 'tmpfile.erase [%s] : "%s"' % (filename, why)) except: lg.out( 2, 'tmpfile.erase ERROR can not remove [%s], we tried because %s' % (filename, why)) # exc() elif os.path.isdir(filename): bpio.rmdir_recursive(filename, ignore_errors=True) if _Debug: lg.out(_DebugLevel, 'tmpfile.erase recursive [%s] : "%s"' % (filename, why)) else: raise Exception('[%s] not exist' % filename)
def setUp(self): try: bpio.rmdir_recursive('/tmp/.bitdust_tmp') except Exception: pass lg.set_debug_level(30) settings.init(base_dir='/tmp/.bitdust_tmp') id_url._IdentityHistoryDir = tempfile.mkdtemp() id_url.init() try: os.makedirs('/tmp/.bitdust_tmp/identitycache/') except: pass try: os.makedirs('/tmp/.bitdust_tmp/identityhistory/') except: pass
def setUp(self): try: bpio.rmdir_recursive('/tmp/.bitdust_tmp') except Exception: pass settings.init(base_dir='/tmp/.bitdust_tmp') lg.set_debug_level(30) try: os.makedirs('/tmp/.bitdust_tmp/logs') except: pass local_fs.WriteTextFile('/tmp/.bitdust_tmp/logs/parallelp.log', '') if self.child_processes_enabled: config.conf().setBool( 'services/rebuilding/child-processes-enabled', True) else: config.conf().setBool( 'services/rebuilding/child-processes-enabled', False)
def setUp(self): try: bpio.rmdir_recursive('/tmp/.bitdust_tmp') except Exception: pass lg.set_debug_level(30) settings.init(base_dir='/tmp/.bitdust_tmp') self.my_current_key = None try: os.makedirs('/tmp/.bitdust_tmp/metadata/') except: pass fout = open('/tmp/_some_priv_key', 'w') fout.write(_some_priv_key) fout.close() fout = open(settings.LocalIdentityFilename(), 'w') fout.write(_some_identity_xml) fout.close() self.assertTrue(key.LoadMyKey(keyfilename='/tmp/_some_priv_key')) self.assertTrue(my_id.loadLocalIdentity())
def test_signed_key(self): try: bpio.rmdir_recursive('/tmp/.bitdust_test_signed_key') except Exception: pass lg.set_debug_level(30) settings.init(base_dir='/tmp/.bitdust_test_signed_key') self.my_current_key = None try: os.makedirs('/tmp/.bitdust_test_signed_key/metadata/') except: pass fout = open('/tmp/_some_priv_key', 'w') fout.write(_some_priv_key) fout.close() fout = open(settings.LocalIdentityFilename(), 'w') fout.write(_some_identity_xml) fout.close() self.assertTrue(key.LoadMyKey(keyfilename='/tmp/_some_priv_key')) self.assertTrue(my_id.loadLocalIdentity()) key_id = '[email protected]_8084' my_keys.erase_key(key_id, keys_folder='/tmp/') my_keys.register_key(key_id, _sample_private_key, keys_folder='/tmp/') is_valid = my_keys.validate_key(my_keys.key_obj(key_id)) self.assertTrue(is_valid) my_keys.sign_key(key_id) signed_key_info = my_keys.get_key_info(key_id, include_private=True, include_signature=True) self.assertTrue(my_keys.verify_key_info_signature(signed_key_info)) key.ForgetMyKey() my_id.forgetLocalIdentity() settings.shutdown() os.remove('/tmp/_some_priv_key') bpio.rmdir_recursive('/tmp/.bitdust_test_signed_key')
def on_identity_url_changed(evt): from access import group_member service_dir = settings.ServiceDir('service_private_groups') groups_dir = os.path.join(service_dir, 'groups') brokers_dir = os.path.join(service_dir, 'brokers') old_idurl = id_url.field(evt.data['old_idurl']) new_idurl = id_url.field(evt.data['new_idurl']) active_group_keys = list(active_groups()) to_be_reconnected = [] for group_key_id in active_group_keys: if not group_key_id: continue group_creator_idurl = global_id.glob2idurl(group_key_id) if id_url.is_the_same(group_creator_idurl, old_idurl): old_group_path = os.path.join(groups_dir, group_key_id) latest_group_key_id = my_keys.latest_key_id(group_key_id) latest_group_path = os.path.join(groups_dir, latest_group_key_id) lg.info('going to rename rotated group file: %r -> %r' % (old_group_path, latest_group_path, )) if os.path.isfile(old_group_path): try: os.rename(old_group_path, latest_group_path) except: lg.exc() continue else: lg.warn('key file %r was not found, key was not renamed' % old_group_path) active_groups()[latest_group_key_id] = active_groups().pop(group_key_id) group_member.rotate_active_group_memeber(group_key_id, latest_group_key_id) gm = group_member.get_active_group_member(group_key_id) if gm and gm.connected_brokers and id_url.is_in(old_idurl, gm.connected_brokers.values()): lg.info('connected broker %r IDURL is rotated, going to reconnect %r' % (old_idurl, gm, )) if group_key_id not in to_be_reconnected: to_be_reconnected.append(group_key_id) known_customers = list(known_brokers().keys()) for customer_id in known_customers: latest_customer_id = global_id.idurl2glob(new_idurl) customer_idurl = global_id.glob2idurl(customer_id) if id_url.is_the_same(customer_idurl, old_idurl): latest_customer_dir = os.path.join(brokers_dir, latest_customer_id) lg.info('going to rename rotated customer id: %r -> %r' % (customer_id, latest_customer_id, )) old_customer_dir = os.path.join(brokers_dir, customer_id) if os.path.isdir(old_customer_dir): try: bpio.move_dir_recursive(old_customer_dir, latest_customer_dir) bpio.rmdir_recursive(old_customer_dir) except: lg.exc() continue known_brokers()[latest_customer_id] = known_brokers().pop(customer_id) for broker_pos, broker_id in enumerate(known_brokers(latest_customer_id)): if not broker_id: continue broker_idurl = global_id.glob2idurl(broker_id) if broker_idurl == old_idurl: latest_broker_id = global_id.idurl2glob(new_idurl) latest_broker_path = os.path.join(latest_customer_dir, latest_broker_id) lg.info('going to rename rotated broker id: %r -> %r' % (broker_id, latest_broker_id, )) old_broker_path = os.path.join(latest_customer_dir, broker_id) if os.path.isfile(old_broker_path): try: os.rename(old_broker_path, latest_broker_path) except: lg.exc() continue if latest_broker_id in known_brokers(latest_customer_id): lg.warn('broker %r already exist' % latest_broker_id) continue known_brokers()[latest_customer_id][broker_pos] = latest_broker_id if _Debug: lg.args(_DebugLevel, to_be_reconnected=to_be_reconnected) for group_key_id in to_be_reconnected: gm = group_member.get_active_group_member(group_key_id) if gm: gm.automat('reconnect')
def load_groups(): loaded_brokers = 0 loaded_groups = 0 service_dir = settings.ServiceDir('service_private_groups') groups_dir = os.path.join(service_dir, 'groups') if not os.path.isdir(groups_dir): bpio._dirs_make(groups_dir) brokers_dir = os.path.join(service_dir, 'brokers') if not os.path.isdir(brokers_dir): bpio._dirs_make(brokers_dir) for group_key_id in os.listdir(groups_dir): latest_group_key_id = my_keys.latest_key_id(group_key_id) latest_group_path = os.path.join(groups_dir, latest_group_key_id) if latest_group_key_id != group_key_id: lg.info('going to rename rotated group key: %r -> %r' % (group_key_id, latest_group_key_id, )) old_group_path = os.path.join(groups_dir, group_key_id) try: os.rename(old_group_path, latest_group_path) except: lg.exc() continue latest_group_info = jsn.loads_text(local_fs.ReadTextFile(latest_group_path)) if not latest_group_info: lg.err('was not able to load group info from %r' % latest_group_path) continue active_groups()[latest_group_key_id] = latest_group_info loaded_groups += 1 for customer_id in os.listdir(brokers_dir): latest_customer_id = global_id.latest_glob_id(customer_id) latest_customer_dir = os.path.join(brokers_dir, latest_customer_id) if latest_customer_id != customer_id: lg.info('going to rename rotated customer id: %r -> %r' % (customer_id, latest_customer_id, )) old_customer_dir = os.path.join(brokers_dir, customer_id) try: bpio.move_dir_recursive(old_customer_dir, latest_customer_dir) bpio.rmdir_recursive(old_customer_dir) except: lg.exc() continue for broker_id in os.listdir(latest_customer_dir): if latest_customer_id not in known_brokers(): known_brokers(latest_customer_id) latest_broker_id = global_id.latest_glob_id(broker_id) latest_broker_path = os.path.join(latest_customer_dir, latest_broker_id) if latest_broker_id != broker_id: lg.info('going to rename rotated broker id: %r -> %r' % (broker_id, latest_broker_id, )) old_broker_path = os.path.join(latest_customer_dir, broker_id) try: os.rename(old_broker_path, latest_broker_path) except: lg.exc() continue latest_broker_info = jsn.loads_text(local_fs.ReadTextFile(latest_broker_path)) if not latest_broker_info: lg.err('was not able to load broker info from %r' % latest_broker_path) continue existing_broker_id = known_brokers(latest_customer_id)[int(latest_broker_info['position'])] if existing_broker_id: if os.path.isfile(latest_broker_path): lg.err('found duplicated broker for customer %r on position %d, erasing file %r' % ( latest_customer_id, int(latest_broker_info['position']), latest_broker_path, )) try: os.remove(latest_broker_path) except: lg.exc() continue known_brokers()[latest_customer_id][int(latest_broker_info['position'])] = latest_broker_id loaded_brokers += 1 if _Debug: lg.args(_DebugLevel, loaded_groups=loaded_groups, loaded_brokers=loaded_brokers)
def tearDown(self): settings.shutdown() bpio.rmdir_recursive('/tmp/.bitdust_tmp')
def doRemoveUnusedFiles(self, *args, **kwargs): """ Action method. """ if not list_files_orator.is_synchronized(): # always make sure we have a very fresh info about remote files before take any actions return # we want to remove files for this block # because we only need them during rebuilding if settings.getBackupsKeepLocalCopies() is True: # if user set this in settings - he want to keep the local files return # ... user do not want to keep local backups if settings.getGeneralWaitSuppliers() is True: from customer import fire_hire # but he want to be sure - all suppliers are green for a long time if len(online_status.listOfflineSuppliers()) > 0 or ( time.time() - fire_hire.GetLastFireTime() < 24 * 60 * 60): # some people are not there or we do not have stable team yet # do not remove the files because we need it to rebuild return count = 0 from storage import backup_matrix from storage import restore_monitor from storage import backup_rebuilder if _Debug: lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles') for backupID in misc.sorted_backup_ids( list(backup_matrix.local_files().keys())): if restore_monitor.IsWorking(backupID): if _Debug: lg.out(_DebugLevel, ' %s : SKIP, because restoring' % backupID) continue if backup_rebuilder.IsBackupNeedsWork(backupID): if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because needs rebuilding' % backupID) continue if not backup_rebuilder.ReadStoppedFlag(): if backup_rebuilder.A().currentBackupID is not None: if backup_rebuilder.A().currentBackupID == backupID: if _Debug: lg.out( _DebugLevel, ' %s : SKIP, because rebuilding is in process' % backupID) continue if backupID not in backup_matrix.remote_files(): if _Debug: lg.out( _DebugLevel, ' going to erase %s because not found in remote files' % backupID) customer, pathID, version = packetid.SplitBackupID(backupID) dirpath = os.path.join(settings.getLocalBackupsDir(), customer, pathID, version) if os.path.isdir(dirpath): try: count += bpio.rmdir_recursive(dirpath, ignore_errors=True) except: lg.exc() continue packets = backup_matrix.ScanBlocksToRemove( backupID, check_all_suppliers=settings.getGeneralWaitSuppliers()) for packetID in packets: customer, pathID = packetid.SplitPacketID(packetID) filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID) if os.path.isfile(filename): try: os.remove(filename) except: lg.exc() continue count += 1 if _Debug: lg.out(_DebugLevel, ' %d files were removed' % count) backup_matrix.ReadLocalFiles()