def snapshot(self, all=False): disks = self.db_entry['block_devices']['devices'] if not all: disks = [disks[0]] snapshot_uuid = str(uuid.uuid4()) snappath = os.path.join(self.snapshot_path, snapshot_uuid) if not os.path.exists(snappath): logutil.debug([self], 'Creating snapshot storage at %s' % snappath) os.makedirs(snappath) with open(os.path.join(self.snapshot_path, 'index.html'), 'w') as f: f.write('<html></html>') for d in disks: if not os.path.exists(d['path']): continue if d['snapshot_ignores']: continue if d['type'] != 'qcow2': continue with util.RecordedOperation('snapshot %s' % d['device'], self): self._snapshot_device( d['path'], os.path.join(snappath, d['device'])) db.create_snapshot(snapshot_uuid, d['device'], self.db_entry['uuid'], time.time()) return snapshot_uuid
def log_request_info(): if LOG.level == logging.DEBUG: output = 'API request headers:\n' for header, value in flask.request.headers: output += ' %s: %s\n' % (header, value) output += 'API request body: %s' % flask.request.get_data() logutil.debug(None, output)
def _get_cache_path(): image_cache_path = os.path.join(config.parsed.get('STORAGE_PATH'), 'image_cache') if not os.path.exists(image_cache_path): logutil.debug(None, 'Creating image cache at %s' % image_cache_path) os.makedirs(image_cache_path) return image_cache_path
def ensure_mesh(self): with db.get_lock('network', None, self.uuid, ttl=120): removed = [] added = [] instances = [] for iface in db.get_network_interfaces(self.uuid): if not iface['instance_uuid'] in instances: instances.append(iface['instance_uuid']) node_fqdns = [] for inst in instances: i = db.get_instance(inst) if not i: continue if not i['node']: continue if not i['node'] in node_fqdns: node_fqdns.append(i['node']) # NOTE(mikal): why not use DNS here? Well, DNS might be outside # the control of the deployer if we're running in a public cloud # as an overlay cloud... node_ips = [config.parsed.get('NETWORK_NODE_IP')] for fqdn in node_fqdns: ip = db.get_node(fqdn)['ip'] if ip not in node_ips: node_ips.append(ip) discovered = list(self.discover_mesh()) logutil.debug([self], 'Discovered mesh elements %s' % discovered) for node in discovered: if node in node_ips: node_ips.remove(node) else: self._remove_mesh_element(node) removed.append(node) for node in node_ips: self._add_mesh_element(node) added.append(node) if removed: db.add_event('network', self.uuid, 'remove mesh elements', None, None, ' '.join(removed)) if added: db.add_event('network', self.uuid, 'add mesh elements', None, None, ' '.join(added))
def wrapper(*args, **kwargs): try: j = flask_get_post_body() if j: for key in j: if key == 'uuid': destkey = 'passed_uuid' else: destkey = key kwargs[destkey] = j[key] formatted_headers = [] for header in flask.request.headers: formatted_headers.append(str(header)) msg = 'API request: %s %s' % (flask.request.method, flask.request.url) msg += '\n Args: %s\n KWargs: %s' % (args, kwargs) if re.match(r'http(|s)://0.0.0.0:\d+/$', flask.request.url): logutil.debug(None, msg) else: logutil.info(None, msg) return func(*args, **kwargs) except TypeError as e: return error(400, str(e)) except DecodeError: # Send a more informative message than 'Not enough segments' return error(401, 'invalid JWT in Authorization header') except ( JWTDecodeError, NoAuthorizationError, InvalidHeaderError, WrongTokenError, RevokedTokenError, FreshTokenRequired, CSRFError, PyJWTError, ) as e: return error(401, str(e)) except Exception: return error(500, 'server error')
def create(self, lock=None): db.update_instance_state(self.db_entry['uuid'], 'creating') # Ensure we have state on disk if not os.path.exists(self.instance_path): logutil.debug( [self], 'Creating instance storage at %s' % self.instance_path) os.makedirs(self.instance_path) # Generate a config drive with util.RecordedOperation('make config drive', self): self._make_config_drive(os.path.join( self.instance_path, self.db_entry['block_devices']['devices'][1]['path'])) # Prepare disks if not self.db_entry['block_devices']['finalized']: modified_disks = [] for disk in self.db_entry['block_devices']['devices']: if disk.get('base'): img = images.Image(disk['base']) hashed_image_path = img.get([lock], self) with util.RecordedOperation('detect cdrom images', self): try: cd = pycdlib.PyCdlib() cd.open(hashed_image_path) disk['present_as'] = 'cdrom' except Exception: pass if disk.get('present_as', 'cdrom') == 'cdrom': # There is no point in resizing or COW'ing a cdrom disk['path'] = disk['path'].replace('.qcow2', '.raw') disk['type'] = 'raw' disk['snapshot_ignores'] = True try: os.link(hashed_image_path, disk['path']) except OSError: # Different filesystems util.execute( [lock], 'cp %s %s' % (hashed_image_path, disk['path'])) # Due to limitations in some installers, cdroms are always on IDE disk['device'] = 'hd%s' % disk['device'][-1] disk['bus'] = 'ide' else: with util.RecordedOperation('resize image', self): resized_image_path = images.resize( [lock], hashed_image_path, disk['size']) if config.parsed.get('DISK_FORMAT') == 'qcow': with util.RecordedOperation('create copy on write layer', self): images.create_cow( [lock], resized_image_path, disk['path']) # Record the backing store for modern libvirts disk['backing'] = ('<backingStore type=\'file\'>\n' ' <format type=\'qcow2\'/>\n' ' <source file=\'%s\'/>\n' ' </backingStore>' % resized_image_path) elif config.parsed.get('DISK_FORMAT') == 'qcow_flat': with util.RecordedOperation('create flat layer', self): images.create_flat( [lock], resized_image_path, disk['path']) elif config.parsed.get('DISK_FORMAT') == 'flat': with util.RecordedOperation('create raw disk', self): images.create_raw( [lock], resized_image_path, disk['path']) else: raise Exception('Unknown disk format') elif not os.path.exists(disk['path']): util.execute(None, 'qemu-img create -f qcow2 %s %sG' % (disk['path'], disk['size'])) modified_disks.append(disk) self.db_entry['block_devices']['devices'] = modified_disks self.db_entry['block_devices']['finalized'] = True db.persist_block_devices( self.db_entry['uuid'], self.db_entry['block_devices']) # Create the actual instance with util.RecordedOperation('create domain XML', self): self._create_domain_xml() # Sometimes on Ubuntu 20.04 we need to wait for port binding to work. # Revisiting this is tracked by issue 320 on github. with util.RecordedOperation('create domain', self): if not self.power_on(): attempts = 0 while not self.power_on() and attempts < 100: logutil.warning( [self], 'Instance required an additional attempt to power on') time.sleep(5) attempts += 1 if self.is_powered_on(): logutil.info([self], 'Instance now powered on') else: logutil.info([self], 'Instance failed to power on') db.update_instance_state(self.db_entry['uuid'], 'created')