def _download(self, endpoint, url, package_id, package_path): try: http_client = HTTPClient(endpoint=endpoint) logging.info("download the upgrade package %s" % package_id) # the stream args must be true, otherwise the download will be failed url = '%s?package_id=%s&package_path=%s' % (url, package_id, package_path) resp, package_chunks = http_client.get(url) return package_chunks except Exception as e: logging.error("download the upgrade package error:%s", e) raise
def post(self, url, data): bind = SERVER_CONF.addresses.get_by_default('server_bind', '') if bind: port = bind.split(':')[-1] else: port = constants.SERVER_DEFAULT_PORT endpoint = 'http://%s:%s' % ("127.0.0.1", port) http_client = HTTPClient(endpoint, timeout=90) headers = {"Content-Type": "application/json"} try: resp, body = http_client.post(url, data=data, headers=headers) except Exception as e: logging.error(''.join(traceback.format_exc())) resp = errcode.get_error_result(error="OtherError") return resp return body
def __init__(self, **kwargs): self.endpoint = kwargs.get('endpoint', None) if self.endpoint: self.http_client = HTTPClient(self.endpoint, timeout=600)
class NodeService(object): def __init__(self, **kwargs): self.endpoint = kwargs.get('endpoint', None) if self.endpoint: self.http_client = HTTPClient(self.endpoint, timeout=600) def ha_sync_voi(self, url, paths, voi_template_list=None, voi_ha_domain_info=None): """ :param url: :param paths: :param voi_template_list: [ { "disk_path": "/opt/slow/instances/voi-2f23d11d-4462-4a85-a5fd-80ce1b308b12", "image_path_list": [ "/opt/slow/instances/_base/voi_0_2f23d11d-4462-4a85-a5fd-80ce1b308b12" ], "torrent_path_list": [ "/opt/slow/instances/_base/voi_0_2f23d11d-4462-4a85-a5fd-80ce1b308b12.torrent" ] }, { "disk_path": "/opt/slow/datas/voi-901b5c81-3eb6-4e53-8df7-cd01484d5c83", "image_path_list": [ "/opt/slow/datas/_base/voi_0_901b5c81-3eb6-4e53-8df7-cd01484d5c83" ], "torrent_path_list": [ "/opt/slow/datas/_base/voi_0_901b5c81-3eb6-4e53-8df7-cd01484d5c83.torrent" ] } ] :param voi_ha_domain_info :return: """ logging.info("paths: %s, voi_template_list: %s, voi_ha_domain_info: %s", paths, voi_template_list, voi_ha_domain_info) # 下载VOI base盘、差分盘、实际启动盘、种子文件 if voi_template_list: for image_path_dict in voi_template_list: if image_path_dict["download_base"]: paths.extend(image_path_dict["image_path_list"]) else: paths.extend(image_path_dict["image_path_list"][1:]) if image_path_dict.get("torrent_path_list", []): paths.extend(image_path_dict["torrent_path_list"]) paths.append(image_path_dict["disk_path"]) # 下载VOI模板XML if voi_ha_domain_info: paths.extend([info["xml_file"] for info in voi_ha_domain_info]) for path in paths: if os.path.exists(path): try: os.remove(path) logging.info("remove file:%s", path) except: pass else: dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path) self.download(url, path) # 建立base盘、差分盘、实际启动盘之间的关系链 if voi_template_list: for image_path_dict in voi_template_list: # 如果VOI模板有差异盘,建立base盘、差分盘之间的关系链 if len(image_path_dict["image_path_list"]) > 1: image_path_dict["image_path_list"].sort() pre_image = image_path_dict["image_path_list"][0] for image in image_path_dict["image_path_list"][1:]: stdout, stderr = cmdutils.execute('qemu-img', 'rebase', '-u', '-b', pre_image, image, run_as_root=True) if stderr: logging.error("image[%s] rebase error: %s" % (image, stderr)) pre_image = image # 把VOI实际启动盘加入关系链 stdout, stderr = cmdutils.execute( 'qemu-img', 'rebase', '-u', '-b', image_path_dict["image_path_list"][-1], image_path_dict["disk_path"], run_as_root=True) if stderr: logging.error("image[%s] rebase error: %s" % (image_path_dict["disk_path"], stderr)) # 启用HA时,在备控上定义VOI模板的虚拟机 if voi_ha_domain_info: for info in voi_ha_domain_info: logging.info("start define_ha_voi_domain: %s", info) guest, T_or_F = VoiLibvirtDriver().define_ha_voi_domain(**info) logging.info("define_ha_voi_domain: %s", T_or_F) return utils.build_result("Success") def ha_sync_file(self, url, paths, check_path=None): for _d in paths: dir_path = os.path.dirname(_d["path"]) if not os.path.exists(dir_path): os.makedirs(dir_path) if os.path.exists(_d["path"]): logging.info("file already exist on backup node, check md5: %s", _d["path"]) md5_sum = hashlib.md5() with open(_d["path"], 'rb') as f: while True: chunk = f.read(constants.CHUNKSIZE) if not chunk: break md5_sum.update(chunk) # 如果提供了md5值,则只下载md5值不同的文件;如果未提供,则直接删除已有文件,重新下载 if _d.get("md5", ""): if md5_sum.hexdigest() == _d["md5"]: logging.info("file ok: %s", _d["path"]) continue else: logging.info("file need resync with old md5: %s", _d["path"]) try: os.remove(_d["path"]) logging.info("remove file: %s", _d["path"]) except: pass self.download(url, _d["path"]) # 删掉指定目录下多余的文件 if check_path: path_list = [_d["path"] for _d in paths] for file in os.listdir(check_path): file_path = os.path.join(check_path, file) if os.path.isfile(file_path): if file_path not in path_list: try: os.remove(file_path) logging.info("check dir path, remove file:%s", file_path) except: pass def download(self, url, path, md5=None): try: logging.info("download the file:%s", path) # the stream args must be true, otherwise the download will be failed url = '%s?path=%s' % (url, path) resp, image_chunks = self.http_client.get(url) if resp.headers.get('Content-Type') == 'application/json': logging.error("file not exist on master: %s", path) return except Exception as e: logging.error("sync error:%s", e) raise logging.info("data is none, open the dst_path:%s", path) data = open(path, 'wb') close_file = True if data is None: return image_chunks else: md5_sum = hashlib.md5() try: for chunk in image_chunks: md5_sum.update(chunk) data.write(chunk) if md5: logging.info("check md5, source:%s, file:%s", md5, md5_sum.hexdigest()) if md5_sum.hexdigest() != md5: logging.error("the source md5_sum:%s, the dest md5_sum:%s", md5, md5_sum.hexdigest()) raise Exception("the file md5 sum check failed") except Exception as ex: logging.error("Error writing to %(path)s: %(exception)s", {'path': path, 'exception': ex}) try: os.remove(path) except: pass raise ex finally: if close_file: # Ensure that the data is pushed all the way down to # persistent storage. This ensures that in the event of a # subsequent host crash we don't have running instances # using a corrupt backing file. data.flush() ImageService()._safe_fsync(data) data.close() def get_data_sync_status(self, paths): for path in paths: if not os.path.exists(path): return False return True def set_ntp(self, server): chronyd_conf = [ "driftfile /var/lib/chrony/drift", "makestep 1.0 3", "rtcsync", "logdir /var/log/chrony", "server %s iburst" % server ] FileOp(constants.CHRONYD_CONF, 'w').write_with_endline('\n'.join(chronyd_conf)) logging.info("config ntp end:%s", chronyd_conf) cmdutils.run_cmd("timedatectl set-ntp yes") def config_ntp(self, ipaddr, netmask): is_mask, bits = utils.is_netmask(netmask) if not is_mask: bits = 24 net = netaddr.IPNetwork(str(ipaddr) + '/' + str(bits)) cidr = str(net.network) + '/' + str(net.prefixlen) chronyd_conf = [ "server ntp1.aliyun.com", "server ntp2.aliyun.com", "server cn.ntp.org.cn", "server cn.pool.ntp.org", "driftfile /var/lib/chrony/drift", "makestep 1.0 3", "rtcsync", "allow %s" % cidr, "local stratum 10", "logdir /var/log/chrony" ] FileOp(constants.CHRONYD_CONF, 'w').write_with_endline('\n'.join(chronyd_conf)) logging.info("config ntp server:%s", chronyd_conf) cmdutils.run_cmd("firewall-cmd --add-service=ntp --permanent") cmdutils.run_cmd("firewall-cmd --reload") cmdutils.run_cmd("timedatectl set-ntp yes") def chunks(self, file_obj, offset=None, chunk_size=64 * 2 ** 10): """ :param file_obj: :param offset: :param chunk_size: :return: """ chunk_size = chunk_size try: file_obj.seek(offset) except: pass while True: data = file_obj.read(chunk_size) if not data: break yield data def set_system_time(self, _datetime, time_zone): try: os.system('timedatectl set-timezone "{}"'.format(time_zone)) os.system('date -s "{}"'.format(_datetime)) os.system("hwclock -w") except Exception as e: logging.error("set system time failed:%s", e) return utils.build_result("OtherError") return utils.build_result("Success") def set_ntp_time(self, ntp_server): try: if not utils.icmp_ping("114.114.114.114", count=3): logging.info("Abnormal network link, set to local time") local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) os.system("date -s '{}'".format(local_time)) for i in range(5): ret = os.system('ntpdate "{}"'.format(ntp_server)) if ret == 0: break else: logging.error("set ntp time error: Abnormal network environment") return utils.build_result("OtherError") os.system("hwclock -w") return utils.build_result("Success") except Exception as e: logging.error("set ntp time error:%s", e) return utils.build_result("OtherError")
class ImageService(object): def __init__(self, **kwargs): self.endpoint = kwargs.get('endpoint', None) if self.endpoint: self.http_client = HTTPClient(self.endpoint, timeout=600) def sync_thread(self, url, images, image_version): """ sync the image """ th1 = Thread(target=self.sync, args=( url, images, image_version, )) th1.start() logging.info("finish sync thread") def sync(self, url, image, version, task_id=None): dest_path = image['dest_path'] backing_file = image['backing_file'] if version > 1 and not os.path.exists(backing_file): # 模板中途添加数据盘时,需要把base文件同步过来 logging.info("syncing the backing file:%s", backing_file) backing_image = { "image_id": image['image_id'], "disk_file": backing_file } self.download(url, backing_image, backing_file) if os.path.exists(dest_path): logging.info("the dest_path %s already exists", dest_path) if image.get('md5_sum', None): logging.info("need check md5, get %s md5 sum", dest_path) md5_sum = get_file_md5(dest_path) if md5_sum == image['md5_sum']: logging.info("check md5 sum success, return") return else: self.download(url, image, dest_path, task_id) else: self.download(url, image, dest_path, task_id) if version >= constants.IMAGE_COMMIT_VERSION: logging.info("commit the diff file:%s", dest_path) stdout, stderr = cmdutils.execute('qemu-img', 'commit', '-f', 'qcow2', dest_path, run_as_root=True) if stderr: raise exception.ImageCommitError(image=dest_path, error=stderr) try: logging.debug("delete the diff file after commit") os.remove(dest_path) except: pass def download(self, url, image, dest_path, task_id=None): try: logging.info("sync the image, info:%s", image) # the stream args must be true, otherwise the download will be failed url = '%s?image_id=%s&image_path=%s&s' % (url, image['image_id'], image['disk_file']) if task_id: url = "%s&task_id=%s" % (url, task_id) resp, image_chunks = self.http_client.get(url) except Exception as e: logging.error("sync error:%s", e) raise logging.info("data is none, open the dst_path:%s", dest_path) utils.ensure_tree(os.path.dirname(dest_path)) data = open(dest_path, 'wb') close_file = True if data is None: return image_chunks else: md5_sum = hashlib.md5() try: for chunk in image_chunks: md5_sum.update(chunk) data.write(chunk) if image.get('md5_sum', None): logging.info("check md5, image:%s, file:%s", image['md5_sum'], md5_sum.hexdigest()) if md5_sum.hexdigest() != image['md5_sum']: logging.error( "the image md5_sum:%s, the receive md5_sum:%s", image['md5_sum'], md5_sum.hexdigest()) raise Exception("the image md5 sum check failed") except Exception as ex: logging.error("Error writing to %(path)s: %(exception)s", { 'path': dest_path, 'exception': ex }) try: os.remove(dest_path) except: pass raise ex finally: if close_file: # Ensure that the data is pushed all the way down to # persistent storage. This ensures that in the event of a # subsequent host crash we don't have running instances # using a corrupt backing file. data.flush() self._safe_fsync(data) data.close() # def sync_thread_single(self, url, image_id, image_type, image_version): # """ # sync the image with single image_id and image type # :param url: the interface route # :param image_id: the image_id of a single disk # :param image_type: the image type, system or data # :param image_version: the image version # :return: # """ # th1 = Thread(target=self.sync_single, args=(url, image_id, image_type, image_version, )) # th1.start() # logging.info("finish sync thread") # # def sync_single(self, url, image_id, image_type, version, data=None): # try: # logging.info("sync the image") # # the stream args must be true, otherwise the download will be failed # url = '%s?image_id=%s&image_type=%s&image_version=%s' % (url, image_id, image_type, version) # resp, image_chunks = self.http_client.get(url) # except Exception as e: # logging.error("sync error:%s", e) # raise # if constants.IMAGE_TYPE_SYSTEM == image_type: # base_path = os.path.join(CONF.libvirt.instances_path, constants.IMAGE_CACHE_DIRECTORY_NAME) # else: # base_path = os.path.join(CONF.libvirt.data_path, constants.IMAGE_CACHE_DIRECTORY_NAME) # if not os.path.isdir(base_path): # utils.ensure_tree(base_path) # dest_file_name = constants.IMAGE_FILE_PREFIX % str(version) + image_id # dest_path = os.path.join(base_path, dest_file_name) # close_file = False # if data is None and dest_path: # logging.info("data is none, open the dst_path:%s", dest_path) # data = open(dest_path, 'wb') # close_file = True # # if data is None: # return image_chunks # else: # try: # for chunk in image_chunks: # data.write(chunk) # except Exception as ex: # logging.error("Error writing to %(path)s: %(exception)s", # {'path': dest_path, 'exception': ex}) # finally: # if close_file: # # Ensure that the data is pushed all the way down to # # persistent storage. This ensures that in the event of a # # subsequent host crash we don't have running instances # # using a corrupt backing file. # data.flush() # self._safe_fsync(data) # data.close() @staticmethod def _safe_fsync(fh): """Performs os.fsync on a filehandle only if it is supported. fsync on a pipe, FIFO, or socket raises OSError with EINVAL. This method discovers whether the target filehandle is one of these types and only performs fsync if it isn't. :param fh: Open filehandle (not a path or fileno) to maybe fsync. """ logging.debug("fsync the file") fileno = fh.fileno() mode = os.fstat(fileno).st_mode # A pipe answers True to S_ISFIFO if not any(check(mode) for check in (stat.S_ISFIFO, stat.S_ISSOCK)): os.fsync(fileno) def recreate_disks(self, disks): for disk in disks: try: os.remove(disk['disk_file']) except: pass cmdutils.execute('qemu-img', 'create', '-f', 'qcow2', disk['disk_file'], '-o', 'backing_file=%s' % disk['backing_file'], run_as_root=True) # def save(self, instance, version, images, timeout=30): # logging.info("save template begin, instance:%s", instance['uuid']) # virt = LibvirtDriver() # virt.power_off(instance, timeout=timeout) # for image in images: # instance_dir = os.path.join(image['base_path'], instance['uuid']) # filename = constants.DISK_FILE_PREFIX + image['image_id'] # source_file = os.path.join(instance_dir, filename) # dest_file = utils.get_backing_file(version, image['image_id'], image['base_path']) # try: # logging.info("move %s to %s", source_file, dest_file) # shutil.move(source_file, dest_file) # except Exception as e: # if isinstance(e, FileNotFoundError): # logging.error("file not found") # logging.info("save template finished") def convert(self, template): source_path = template['backing_file'] dest_path = template['dest_file'] logging.info("start convert, source:%s, dest:%s", source_path, dest_path) if template['need_convert']: logging.info("convert from %s to %s", source_path, dest_path) cmdutils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'qcow2', source_path, dest_path, run_as_root=True) else: logging.info("generate base image from origin image") try: shutil.copy(source_path, dest_path) except IOError as e: logging.error("copy image failed:%s", e) raise exception.ImageCopyIOError(source_path) logging.info("generat new image success") return {'path': dest_path} def read_in_block(self, file_path): with open(file_path, "rb") as f: while True: block = f.read(constants.CHUNKSIZE) if block: yield block else: return def write_header(self, data): try: vcpu = data['vcpu'] ram = data['ram'] disk_size = data['disk_size'] image_path = data['image_path'] md5_sum = get_file_md5(image_path) head = "yzy|os_type:%s|os_bit:%s|version:%s|vcpu:%s|ram:%s|disk:%s|md5:%s" % ( 'win7', 32, 1, vcpu, ram, disk_size, md5_sum) head = head.encode("utf-8") max_length = 200 space_num = max_length - len(head) _head = head + b' ' * space_num file_name = "%s_c%s_r%s_d%s" % (image_path.split('/')[-1], vcpu, ram, disk_size) image_path_info = image_path.split('/') image_path_info[-1] = file_name file_path = '/'.join(image_path_info) with open(file_path, "wb+") as f: f.write(_head) for block in self.read_in_block(image_path): f.write(block) logging.info("write head info to image %s success, dest_path:%s", image_path, file_path) except Exception as e: logging.exception("write head info failed:%s", e) try: logging.info("delete dest image file:%s", file_path) os.remove(file_path) except: pass raise e finally: try: logging.info("delete origin image file:%s", image_path) os.remove(image_path) except: pass return {'path': file_path} def copy_images(self, image): backing_file = image['backing_file'] dest_file = image['dest_file'] try: logging.info("copy file from %s to %s", backing_file, dest_file) shutil.copy(backing_file, dest_file) except IOError as e: logging.error("copy image failed:%s", e) raise exception.ImageCopyIOError(backing_file) logging.info("copy new image success") return True def delete_image(self, image): try: logging.info("delete file %s", image['disk_file']) if os.path.exists(image['disk_file']): os.remove(image['disk_file']) except IOError as e: logging.error("copy image failed:%s", e) raise exception.ImageDeleteIOError(image['disk_file']) logging.info("delete image success") return True def resize_disk(self, images): for image in images: try: logging.info("resize file %s", image['disk_file']) size = '+%sG' % image['size'] cmdutils.execute('qemu-img', 'resize', image['disk_file'], size, run_as_root=True) except Exception as e: logging.error("resize image file failed:%s", e) raise exception.ImageResizeError(image=image['disk_file'], error=e) logging.info("resize image success") return True def create_qcow2_file(self, disk_file, size): dir_path = os.path.dirname(disk_file) if not os.path.exists(dir_path): os.makedirs(dir_path) cmdutils.execute('qemu-img', 'create', '-f', 'qcow2', disk_file, size, run_as_root=True) logging.info("create qcow2 file success") return True