Esempio n. 1
0
    def remove_snap(self, volume, name, ignore_errors=False, pool=None,
                    force=False):
        """Removes a snapshot from an RBD volume.

        :volume: Name of RBD object
        :name: Name of snapshot
        :ignore_errors: whether or not to log warnings on failures
        :pool: Name of pool
        :force: Remove snapshot even if it is protected
        """
        with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
            if name in [snap.get('name', '') for snap in vol.list_snaps()]:
                if vol.is_protected_snap(name):
                    if force:
                        tpool.execute(vol.unprotect_snap, name)
                    elif not ignore_errors:
                        LOG.warning(_LW('snapshot(%(name)s) on rbd '
                                        'image(%(img)s) is protected, '
                                        'skipping'),
                                    {'name': name, 'img': volume})
                        return
                LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',
                          {'name': name, 'img': volume})
                tpool.execute(vol.remove_snap, name)
            elif not ignore_errors:
                LOG.warning(_LW('no snapshot(%(name)s) found on rbd '
                                'image(%(img)s)'),
                            {'name': name, 'img': volume})
Esempio n. 2
0
    def copy_put(self, fd, tmppath):
        
        tpool.execute(os.fsync, fd)
        
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(os.path.join(self.container_path,
                            tmp_path)):
                        self.logger.error("Failed in subdir %s",\
                                        os.path.join(self.container_path,tmp_path))
                        return False

        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj))
        
        do_chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
        
        return True
Esempio n. 3
0
    def update_deleted(self, job):
        """
        High-level method that replicates a single partition that doesn't
        belong on this node.

        :param job: a dict containing info about the partition to be replicated
        """

        def tpool_get_suffixes(path):
            return [suff for suff in os.listdir(path)
                    if len(suff) == 3 and isdir(join(path, suff))]
        self.replication_count += 1
        begin = time.time()
        try:
            responses = []
            suffixes = tpool.execute(tpool_get_suffixes, job['path'])
            if suffixes:
                for node in job['nodes']:
                    success = self.rsync(node, job, suffixes)
                    if success:
                        with Timeout(self.http_timeout):
                            http_connect(node['ip'], node['port'],
                                node['device'], job['partition'], 'REPLICATE',
                                '/' + '-'.join(suffixes),
                          headers={'Content-Length': '0'}).getresponse().read()
                    responses.append(success)
            if not suffixes or (len(responses) == \
                        self.object_ring.replica_count and all(responses)):
                self.logger.info(_("Removing partition: %s"), job['path'])
                tpool.execute(shutil.rmtree, job['path'], ignore_errors=True)
        except (Exception, Timeout):
            self.logger.exception(_("Error syncing handoff partition"))
        finally:
            self.partition_times.append(time.time() - begin)
Esempio n. 4
0
    def put(self, metadata, extension='.data'):
        """
        Finalize writing the file on disk, and renames it from the temp file
        to the real location.  This should be called after the data has been
        written to the temp file.

        :param metadata: dictionary of metadata to be written
        :param extension: extension to be used when making the file
        """
        assert self.tmppath is not None
        timestamp = normalize_timestamp(metadata['X-Timestamp'])
        metadata['name'] = self.disk_file.name
        # Write the metadata before calling fsync() so that both data and
        # metadata are flushed to disk.
        write_metadata(self.fd, metadata)
        # We call fsync() before calling drop_cache() to lower the amount of
        # redundant work the drop cache code will perform on the pages (now
        # that after fsync the pages will be all clean).
        tpool.execute(fsync, self.fd)
        # From the Department of the Redundancy Department, make sure we
        # call drop_cache() after fsync() to avoid redundant work (pages
        # all clean).
        drop_buffer_cache(self.fd, 0, self.upload_size)
        invalidate_hash(os.path.dirname(self.disk_file.datadir))
        # After the rename completes, this object will be available for other
        # requests to reference.
        renamer(self.tmppath,
                os.path.join(self.disk_file.datadir, timestamp + extension))
        self.disk_file.metadata = metadata
Esempio n. 5
0
def do_fsync(fd):
    try:
        tpool.execute(os.fsync, fd)
    except OSError as err:
        logging.exception("fsync failed with err: %s", err.strerror)
        raise
    return True
Esempio n. 6
0
def verify_glance_image_signature(context, image_service, image_id, path):
    verifier = None
    image_meta = image_service.show(context, image_id)
    image_properties = image_meta.get('properties', {})
    img_signature = image_properties.get('img_signature')
    img_sig_hash_method = image_properties.get('img_signature_hash_method')
    img_sig_cert_uuid = image_properties.get('img_signature_certificate_uuid')
    img_sig_key_type = image_properties.get('img_signature_key_type')
    if all(m is None for m in [img_signature,
                               img_sig_cert_uuid,
                               img_sig_hash_method,
                               img_sig_key_type]):
        # NOTE(tommylikehu): We won't verify the image signature
        # if none of the signature metadata presents.
        return False
    if any(m is None for m in [img_signature,
                               img_sig_cert_uuid,
                               img_sig_hash_method,
                               img_sig_key_type]):
            LOG.error('Image signature metadata for image %s is '
                      'incomplete.', image_id)
            raise exception.InvalidSignatureImage(image_id=image_id)

    try:
        verifier = signature_utils.get_verifier(
            context=context,
            img_signature_certificate_uuid=img_sig_cert_uuid,
            img_signature_hash_method=img_sig_hash_method,
            img_signature=img_signature,
            img_signature_key_type=img_sig_key_type,
        )
    except cursive_exception.SignatureVerificationError:
        message = _('Failed to get verifier for image: %s') % image_id
        LOG.error(message)
        raise exception.ImageSignatureVerificationException(
            reason=message)
    if verifier:
        with fileutils.remove_path_on_error(path):
            with open(path, "rb") as tem_file:
                try:
                    tpool.execute(_verify_image, tem_file, verifier)
                    LOG.info('Image signature verification succeeded '
                             'for image: %s', image_id)
                    return True
                except cryptography.exceptions.InvalidSignature:
                    message = _('Image signature verification '
                                'failed for image: %s') % image_id
                    LOG.error(message)
                    raise exception.ImageSignatureVerificationException(
                        reason=message)
                except Exception as ex:
                    message = _('Failed to verify signature for '
                                'image: %(image)s due to '
                                'error: %(error)s ') % {'image': image_id,
                                                        'error':
                                                            six.text_type(ex)}
                    LOG.error(message)
                    raise exception.ImageSignatureVerificationException(
                        reason=message)
    return False
Esempio n. 7
0
def _transfer_data(src, dest, length, chunk_size):
    """Transfer data between files (Python IO objects)."""

    chunks = int(math.ceil(length / chunk_size))
    remaining_length = length

    LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
              {'chunks': chunks, 'bytes': chunk_size})

    for chunk in range(0, chunks):
        before = time.time()
        data = tpool.execute(src.read, min(chunk_size, remaining_length))

        # If we have reached end of source, discard any extraneous bytes from
        # destination volume if trim is enabled and stop writing.
        if data == b'':
            break

        tpool.execute(dest.write, data)
        remaining_length -= len(data)
        delta = (time.time() - before)
        rate = (chunk_size / delta) / units.Ki
        LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
                  {'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})

        # yield to any other pending operations
        eventlet.sleep(0)

    tpool.execute(dest.flush)
Esempio n. 8
0
 def test_timeout(self):
     blocking = eventlet.patcher.original('time')
     eventlet.Timeout(0.1, eventlet.Timeout())
     try:
         tpool.execute(blocking.sleep, 0.3)
         assert False, 'Expected Timeout'
     except eventlet.Timeout:
         pass
Esempio n. 9
0
 def delete_partition(self, path):
     self.logger.info(_("Removing partition: %s"), path)
     try:
         tpool.execute(shutil.rmtree, path)
     except OSError as e:
         if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
             # If there was a race to create or delete, don't worry
             raise
Esempio n. 10
0
    def put(self, fd, tmppath, metadata, extension=''):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :params fd: file descriptor of the temp file
        :param tmppath: path to the temporary file being used
        :param metadata: dictionary of metadata to be written
        :param extention: extension to be used when making the file
        """
        #Marker dir.
        if extension == '.ts':
            return True
        if extension == '.meta':
            self.put_metadata(metadata)
            return True
        else:
            extension = ''
        if metadata[X_OBJECT_TYPE] == MARKER_DIR:
            self.create_dir_object(os.path.join(self.datadir, self.obj))
            self.put_metadata(metadata)
            self.data_file = self.datadir + '/' + self.obj
            return True
        #Check if directory already exists.
        if self.is_dir:
            self.logger.error('Directory already exists %s/%s' % \
                          (self.datadir , self.obj))
            return False
        #metadata['name'] = self.name
        timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
        write_metadata(tmppath, metadata)
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(os.path.join(self.container_path,
                            tmp_path)):
                        self.logger.error("Failed in subdir %s",\
                                        os.path.join(self.container_path,tmp_path))
                        return False

        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj + extension))
        do_chown(os.path.join(self.datadir, self.obj + extension), \
              self.uid, self.gid)
        self.metadata = metadata
        #self.logger.error("Meta %s", self.metadata)
        self.data_file = self.datadir + '/' + self.obj + extension
        return True
Esempio n. 11
0
    def start(self):
        super(EventStreamService, self).start()

        LOG.debug("Starting evenstream thread")

        self.load_extenstions()

        self.gerrit = GerritEvents(self._map_gerrit)
        tpool.execute(self.gerrit.start)
Esempio n. 12
0
    def create_snap(self, volume, name):
        """Create a snapshot on an RBD object.

        :volume: Name of RBD object
        :name: Name of snapshot
        """
        LOG.debug('creating snapshot(%(snap)s) on rbd image(%(img)s)',
                  {'snap': name, 'img': volume})
        with RBDVolumeProxy(self, volume) as vol:
            tpool.execute(vol.create_snap, name)
Esempio n. 13
0
    def flatten(self, volume, pool=None):
        """"Flattens" a snapshotted image with the parents' data,
        effectively detaching it from the parent.

        :volume: Name of RBD object
        :pool: Name of pool
        """
        LOG.debug('flattening %(pool)s/%(vol)s', dict(pool=pool, vol=volume))
        with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
            tpool.execute(vol.flatten)
Esempio n. 14
0
 def _define_filter(self, xml):
     if callable(xml):
         xml = xml()
     # execute in a native thread and block current greenthread until done
     if not CONF.libvirt_nonblocking:
         # NOTE(maoy): the original implementation is to have the API called
         # in the thread pool no matter what.
         tpool.execute(self._conn.nwfilterDefineXML, xml)
     else:
         # NOTE(maoy): self._conn is an eventlet.tpool.Proxy object
         self._conn.nwfilterDefineXML(xml)
Esempio n. 15
0
    def put(self, fd, tmppath, metadata, extension=''):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :params fd: file descriptor of the temp file
        :param tmppath: path to the temporary file being used
        :param metadata: dictionary of metadata to be written
        :param extention: extension to be used when making the file
        """
        #Marker dir.
        if metadata[X_OBJECT_TYPE] == MARKER_DIR:
            if os.path.exists(os.path.join(self.datadir, self.obj)) and \
               not os.path.isdir(os.path.join(self.datadir, self.obj)):
                os.unlink(os.path.join(self.datadir, self.obj))
            mkdirs(os.path.join(self.datadir, self.obj))
            os.chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
            self.put_metadata(metadata)
            self.data_file = self.datadir + '/' + self.obj
            return True
        #Check if directory already exists.
        if self.is_dir:
            logging.error('Directory already exists %s/%s' % \
                          (self.datadir , self.obj))
            return False
        #metadata['name'] = self.name
        timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
        write_metadata(fd, metadata)
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(tmp_path, metadata[X_TIMESTAMP]):
                        return False
                                       
        #print 'Gaurav put tmppath', tmppath, os.path.join(self.datadir,
                                                          #self.obj+extension)
        #invalidate_hash(os.path.dirname(self.datadir))
        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj + extension))
        os.chown(os.path.join(self.datadir, self.obj + extension), \
              self.uid, self.gid)
        self.metadata = metadata
        self.data_file = self.datadir + '/' + self.obj + extension
        return True
Esempio n. 16
0
    def rollback_to_snap(self, volume, name):
        """Revert an RBD volume to its contents at a snapshot.

        :volume: Name of RBD object
        :name: Name of snapshot
        """
        with RBDVolumeProxy(self, volume) as vol:
            if name in [snap.get('name', '') for snap in vol.list_snaps()]:
                LOG.debug('rolling back rbd image(%(img)s) to '
                          'snapshot(%(snap)s)', {'snap': name, 'img': volume})
                tpool.execute(vol.rollback_to_snap, name)
            else:
                raise exception.SnapshotNotFound(snapshot_id=name)
 def delete_handoff_objs(self, job, delete_objs):
     for object_hash in delete_objs:
         object_path = storage_directory(job['obj_path'], job['partition'],
                                         object_hash)
         tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
         suffix_dir = dirname(object_path)
         try:
             os.rmdir(suffix_dir)
         except OSError as e:
             if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
                 self.logger.exception(
                     "Unexpected error trying to cleanup suffix dir:%r",
                     suffix_dir)
Esempio n. 18
0
    def put(self, fd, metadata, extension='.data'):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :param fd: file descriptor of the temp file
        :param metadata: dictionary of metadata to be written
        :param extension: extension to be used when making the file
        """
        # Our caller will use '.data' here; we just ignore it since we map the
        # URL directly to the file system.
        extension = ''

        metadata = _adjust_metadata(metadata)

        if metadata[X_OBJECT_TYPE] == MARKER_DIR:
            if not self.data_file:
                self.data_file = os.path.join(self.datadir, self._obj)
                self._create_dir_object(self.data_file)
            self.put_metadata(metadata)
            return

        # Check if directory already exists.
        if self._is_dir:
            # FIXME: How can we have a directory and it not be marked as a
            # MARKER_DIR (see above)?
            msg = 'File object exists as a directory: %s' % self.data_file
            raise AlreadyExistsAsDir(msg)

        timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
        write_metadata(self.tmppath, metadata)
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        if self._obj_path:
            dir_objs = self._obj_path.split('/')
            assert len(dir_objs) >= 1
            tmp_path = self._container_path
            for dir_name in dir_objs:
                tmp_path = os.path.join(tmp_path, dir_name)
                self._create_dir_object(tmp_path)

        newpath = os.path.join(self.datadir, self._obj)
        renamer(self.tmppath, newpath)
        do_chown(newpath, self.uid, self.gid)
        self.metadata = metadata
        self.data_file = newpath
        self.filter_metadata()
        return
Esempio n. 19
0
    def create_snap(self, volume, name, pool=None, protect=False):
        """Create a snapshot of an RBD volume.

        :volume: Name of RBD object
        :name: Name of snapshot
        :pool: Name of pool
        :protect: Set the snapshot to "protected"
        """
        LOG.debug('creating snapshot(%(snap)s) on rbd image(%(img)s)',
                  {'snap': name, 'img': volume})
        with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
            tpool.execute(vol.create_snap, name)
            if protect and not vol.is_protected_snap(name):
                tpool.execute(vol.protect_snap, name)
Esempio n. 20
0
 def delete_handoff_objs(self, job, delete_objs):
     success_paths = []
     error_paths = []
     for object_hash in delete_objs:
         object_path = storage_directory(job["obj_path"], job["partition"], object_hash)
         tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
         suffix_dir = dirname(object_path)
         try:
             os.rmdir(suffix_dir)
             success_paths.append(object_path)
         except OSError as e:
             if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
                 error_paths.append(object_path)
                 self.logger.exception("Unexpected error trying to cleanup suffix dir:%r", suffix_dir)
     return success_paths, error_paths
Esempio n. 21
0
    def update_deleted(self, job):
        """
        High-level method that replicates a single partition that doesn't
        belong on this node.

        :param job: a dict containing info about the partition to be replicated
        """

        def tpool_get_suffixes(path):
            return [suff for suff in os.listdir(path) if len(suff) == 3 and isdir(join(path, suff))]

        self.replication_count += 1
        self.logger.increment("partition.delete.count.%s" % (job["device"],))
        self.headers["X-Backend-Storage-Policy-Index"] = job["policy_idx"]
        begin = time.time()
        try:
            responses = []
            suffixes = tpool.execute(tpool_get_suffixes, job["path"])
            if suffixes:
                for node in job["nodes"]:
                    success = self.sync(node, job, suffixes)
                    if success:
                        with Timeout(self.http_timeout):
                            conn = http_connect(
                                node["replication_ip"],
                                node["replication_port"],
                                node["device"],
                                job["partition"],
                                "REPLICATE",
                                "/" + "-".join(suffixes),
                                headers=self.headers,
                            )
                            conn.getresponse().read()
                    responses.append(success)
            if self.handoff_delete:
                # delete handoff if we have had handoff_delete successes
                delete_handoff = len([resp for resp in responses if resp]) >= self.handoff_delete
            else:
                # delete handoff if all syncs were successful
                delete_handoff = len(responses) == len(job["nodes"]) and all(responses)
            if not suffixes or delete_handoff:
                self.logger.info(_("Removing partition: %s"), job["path"])
                tpool.execute(shutil.rmtree, job["path"], ignore_errors=True)
        except (Exception, Timeout):
            self.logger.exception(_("Error syncing handoff partition"))
        finally:
            self.partition_times.append(time.time() - begin)
            self.logger.timing_since("partition.delete.timing", begin)
Esempio n. 22
0
 def call_xenapi(self, method, *args):
     """Call the specified XenAPI method on a background thread."""
     with self._get_session() as session:
         f = session.xenapi
         for m in method.split('.'):
             f = getattr(f, m)
         return tpool.execute(f, *args)
Esempio n. 23
0
def recalculate_hashes(partition_dir, suffixes, reclaim_age=ONE_WEEK):
    """
    Recalculates hashes for the given suffixes in the partition and updates
    them in the partition's hashes file.

    :param partition_dir: directory of the partition in which to recalculate
    :param suffixes: list of suffixes to recalculate
    :param reclaim_age: age in seconds at which tombstones should be removed
    """

    def tpool_listdir(partition_dir):
        return dict(((suff, None) for suff in os.listdir(partition_dir)
                     if len(suff) == 3 and isdir(join(partition_dir, suff))))
    hashes_file = join(partition_dir, HASH_FILE)
    with lock_path(partition_dir):
        try:
            with open(hashes_file, 'rb') as fp:
                hashes = pickle.load(fp)
        except Exception:
            hashes = tpool.execute(tpool_listdir, partition_dir)
        for suffix in suffixes:
            suffix_dir = join(partition_dir, suffix)
            if os.path.exists(suffix_dir):
                hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)
            elif suffix in hashes:
                del hashes[suffix]
        with open(hashes_file + '.tmp', 'wb') as fp:
            pickle.dump(hashes, fp, PICKLE_PROTOCOL)
        renamer(hashes_file + '.tmp', hashes_file)
Esempio n. 24
0
 def discard(self, instance_name):
     """
     Dicard all of the vms artifacts associated with a blessed instance
     """
     LOG.debug(_("Calling commands.discard with name=%s"), instance_name)
     result = tpool.execute(commands.discard, instance_name)
     LOG.debug(_("Called commands.discard with name=%s"), instance_name)
Esempio n. 25
0
    def launch(self, context, instance_name, mem_target,
               new_instance_ref, network_info, migration_url=None):
        """
        Launch a blessed instance
        """
        newname = self.pre_launch(context, new_instance_ref, network_info,
                                  migration=(migration_url and True))

        # Launch the new VM.
        LOG.debug(_("Calling vms.launch with name=%s, new_name=%s, target=%s, migration_url=%s"),
                  instance_name, newname, mem_target, str(migration_url))

        result = tpool.execute(commands.launch,
                               instance_name,
                               newname,
                               str(mem_target),
                               network=migration_url,
                               migration=(migration_url and True))

        LOG.debug(_("Called vms.launch with name=%s, new_name=%s, target=%s, migration_url=%s"),
                  instance_name, newname, mem_target, str(migration_url))

        # Take care of post-launch.
        self.post_launch(context,
                         new_instance_ref,
                         network_info,
                         migration=(migration_url and True))
        return result
Esempio n. 26
0
 def call_xenapi_request(self, method, *args):
     """Some interactions with dom0, such as interacting with xenstore's
     param record, require using the xenapi_request method of the session
     object. This wraps that call on a background thread.
     """
     f = self._session.xenapi_request
     return tpool.execute(f, method, *args)
Esempio n. 27
0
    def write(self, chunk):
        """
        Write a chunk of data into the temporary file.

        :param chunk: the chunk of data to write as a string object
        """
        while chunk:
            written = os.write(self.fd, chunk)
            self.upload_size += written
            chunk = chunk[written:]
            # For large files sync every 512MB (by default) written
            diff = self.upload_size - self.last_sync
            if diff >= self.disk_file.bytes_per_sync:
                tpool.execute(fdatasync, self.fd)
                drop_buffer_cache(self.fd, self.last_sync, diff)
                self.last_sync = self.upload_size
Esempio n. 28
0
    def update_deleted(self, job):
        """
	高层次的方法来复制一个不属于本结点的单partition
        High-level method that replicates a single partition that doesn't
        belong on this node.

        :param job: a dict containing info about the partition to be replicated
	包含要被复制的partition的信息的字典
        """

        def tpool_get_suffixes(path):
            return [suff for suff in os.listdir(path)
                    if len(suff) == 3 and isdir(join(path, suff))]
        self.replication_count += 1
        self.logger.increment('partition.delete.count.%s' % (job['device'],))
        begin = time.time()
        try:
            responses = []
            suffixes = tpool.execute(tpool_get_suffixes, job['path'])
            if suffixes:
                for node in job['nodes']:
                    success = self.rsync(node, job, suffixes)
                    if success:
                        with Timeout(self.http_timeout):
                            conn = http_connect(
                                node['replication_ip'],
                                node['replication_port'],
                                node['device'], job['partition'], 'REPLICATE',
                                '/' + '-'.join(suffixes), headers=self.headers)
                            conn.getresponse().read()
                    responses.append(success)
            if self.handoff_delete:
                # delete handoff if we have had handoff_delete successes
                delete_handoff = len([resp for resp in responses if resp]) >= \
                    self.handoff_delete
            else:
                # delete handoff if all syncs were successful
                delete_handoff = len(responses) == len(job['nodes']) and \
                    all(responses)
            if not suffixes or delete_handoff:
                self.logger.info(_("Removing partition: %s"), job['path'])
                tpool.execute(shutil.rmtree, job['path'], ignore_errors=True)
        except (Exception, Timeout):
            self.logger.exception(_("Error syncing handoff partition"))
        finally:
            self.partition_times.append(time.time() - begin)
            self.logger.timing_since('partition.delete.timing', begin)
Esempio n. 29
0
    def put(self, fd, tmppath, metadata,extension=''):
        
        if extension == '.ts':
            # TombStone marker (deleted)
            return True
        
        metadata[X_TYPE] = OBJECT
        
        if extension == '.meta':
            # Metadata recorded separately from the file
            self.meta_put_metadata(metadata)
            return True

        # Check if directory already exists.
        if self.is_dir:
            self.logger.error('Directory already exists %s/%s' % \
                          (self.datadir , self.obj))
            return False

        meta_write_metadata(self.metafile, metadata)
        
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(os.path.join(self.container_path,
                            tmp_path)):
                        self.logger.error("Failed in subdir %s",\
                                        os.path.join(self.container_path,tmp_path))
                        return False

        renamer(tmppath, os.path.join(self.datadir,
                                      self.obj))
        
        do_chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
        self.metadata = metadata
        
        return True
Esempio n. 30
0
 def connect(cls, db_module, connect_timeout, *args, **kw):
     t = timeout.Timeout(connect_timeout, ConnectTimeout())
     try:
         from eventlet import tpool
         conn = tpool.execute(db_module.connect, *args, **kw)
         return tpool.Proxy(conn, autowrap_names=('cursor',))
     finally:
         t.cancel()
Esempio n. 31
0
 def delete_partition(self, path):
     self.logger.info(_("Removing partition: %s"), path)
     tpool.execute(shutil.rmtree, path, ignore_errors=True)
Esempio n. 32
0
        with file.mkstemp() as (fd, tmppath):
            if 'content-length' in request.headers:
                fallocate(fd, int(request.headers['content-length']))
            reader = request.environ['wsgi.input'].read
            for chunk in iter(lambda: reader(self.network_chunk_size), ''):
                upload_size += len(chunk)
                if time.time() > upload_expiration:
                    self.logger.increment('PUT.timeouts')
                    return HTTPRequestTimeout(request=request)
                etag.update(chunk)
                while chunk:
                    written = os.write(fd, chunk)
                    chunk = chunk[written:]
                # For large files sync every 512MB (by default) written
                if upload_size - last_sync >= self.bytes_per_sync:
                    tpool.execute(os.fdatasync, fd)
                    drop_buffer_cache(fd, last_sync, upload_size - last_sync)
                    last_sync = upload_size
                sleep()

            if 'content-length' in request.headers and \
                    int(request.headers['content-length']) != upload_size:
                return HTTPClientDisconnect(request=request)
            etag = etag.hexdigest()
            if 'etag' in request.headers and \
                            request.headers['etag'].lower() != etag:
                return HTTPUnprocessableEntity(request=request)
            metadata = {
                'X-Timestamp': request.headers['x-timestamp'],
                'Content-Type': request.headers['content-type'],
                'ETag': etag,
Esempio n. 33
0
 def alternate_execute(self):
     if self.enabled:
         self.set_result(tpool.execute(self.override.call), stop_executing=True)
         return True
     return False
Esempio n. 34
0
def all_db(amount, start):
    return tpool.execute(
        execute,
        'SELECT id, nickname, score, status FROM users LIMIT ? OFFSET ?',
        [amount, start])
Esempio n. 35
0
    def put(self, fd, tmppath, metadata, extension=''):
        """
        Finalize writing the file on disk, and renames it from the temp file to
        the real location.  This should be called after the data has been
        written to the temp file.

        :params fd: file descriptor of the temp file
        :param tmppath: path to the temporary file being used
        :param metadata: dictionary of metadata to be written
        :param extention: extension to be used when making the file
        """
        if extension == '.ts':
            # TombStone marker (deleted)
            return True

        # Fix up the metadata to ensure it has a proper value for the
        # Content-Type metadata, as well as an X_TYPE and X_OBJECT_TYPE
        # metadata values.

        content_type = metadata['Content-Type']
        if not content_type:
            metadata['Content-Type'] = FILE_TYPE
            x_object_type = FILE
        else:
            x_object_type = MARKER_DIR if content_type.lower(
            ) == DIR_TYPE else FILE
        metadata[X_TYPE] = OBJECT
        metadata[X_OBJECT_TYPE] = x_object_type

        if extension == '.meta':
            # Metadata recorded separately from the file
            self.put_metadata(metadata)
            return True

        extension = ''

        if metadata[X_OBJECT_TYPE] == MARKER_DIR:
            self.create_dir_object(os.path.join(self.datadir, self.obj))
            self.put_metadata(metadata)
            self.data_file = self.datadir + '/' + self.obj
            return True

        # Check if directory already exists.
        if self.is_dir:
            self.logger.error('Directory already exists %s/%s' % \
                          (self.datadir , self.obj))
            return False

        timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
        write_metadata(tmppath, metadata)
        if X_CONTENT_LENGTH in metadata:
            self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
        tpool.execute(os.fsync, fd)
        if self.obj_path:
            dir_objs = self.obj_path.split('/')
            tmp_path = ''
            if len(dir_objs):
                for dir_name in dir_objs:
                    if tmp_path:
                        tmp_path = tmp_path + '/' + dir_name
                    else:
                        tmp_path = dir_name
                    if not self.create_dir_object(
                            os.path.join(self.container_path, tmp_path)):
                        self.logger.error("Failed in subdir %s",\
                                        os.path.join(self.container_path,tmp_path))
                        return False

        renamer(tmppath, os.path.join(self.datadir, self.obj + extension))
        do_chown(os.path.join(self.datadir, self.obj + extension), \
              self.uid, self.gid)
        self.metadata = metadata
        self.data_file = self.datadir + '/' + self.obj + extension
        return True
Esempio n. 36
0
__test__ = False

if __name__ == '__main__':
    import warnings
    from eventlet import tpool
    g = [False]

    def do():
        g[0] = True

    with warnings.catch_warnings(record=True) as ws:
        warnings.simplefilter('always', category=RuntimeWarning)

        tpool.execute(do)

        msgs = [str(w) for w in ws]
        assert len(ws) == 1, msgs
        msg = str(ws[0].message)
        assert 'Zero threads in tpool' in msg
        assert 'EVENTLET_THREADPOOL_SIZE' in msg

    assert g[0]
    print('pass')
Esempio n. 37
0
def users_top(amount, start):
    return tpool.execute(
        execute,
        'SELECT nickname, score FROM users ORDER BY score DESC LIMIT ? OFFSET ?',
        [amount, start])
Esempio n. 38
0
    abort(404)


@APP.route('/check/<name>')
def check(name):
  """Run a check."""
  try:
    checkFun = checker(name)
  except KeyError, e:
    print e
    return abort(404)

  args = request.args.getlist('arg')
  args.insert(0, 'check_%s' % name)

  output = tpool.execute(checkFun, args)
  if GRAPHITE:
    try:
      parsed = nagios.parseResponse(output)
    except Exception, e: # ok to catch generic error # pylint: disable=W0703
      print 'During %s: %r' % (name, e)
      parsed = None

    if parsed and parsed[2]:
      for k, v in parsed[2].iteritems():
        if isinstance(v, (int, long, float)):
          parts = ['checkserver', name]
          parts.extend(args[1:])
          parts.append(k)
          GRAPHITE.enqueue('.'.join(parts), v)
      if not GRAPHITE.isAlive():
Esempio n. 39
0
def _gethostbyname_tpool(name):
    from eventlet import tpool
    return tpool.execute(
        __original_gethostbyname__, name)
Esempio n. 40
0
def find_meme(text):
    MAX_ANCESTOR_SEARCH = 3
    MAX_SIBLING_SEARCH = 15
    if text.find('"') != -1 or text.find("'") != -1:
        raise SearchQueryNotValidError(f"text {repr(text)} has quote inside.")
    for floor in floors:
        now = time.time()
        url = 'https://forum.gamer.com.tw/C.php?bsn=60076&snA=5491441'
        response = requests.get(url + f"&to={floor}")
        tree = etree.HTML(response.text)
        #xpath=f'//div[//font[contains(translate(@color, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"),"#ff0000")] and normalize-space(.)="{text}"][1]/ancestor-or-self::div/following-sibling::div//a[contains(@class,"photoswipe-image")][1]'
        xpath = f'//div[@id="BH-background"]/div[@id="BH-wrapper"]/div[@id="BH-master"]/section[contains(@class,"c-section")]/div[contains(@class,"c-section__main")]/div[contains(@class,"c-post__body")]/article/div[contains(@class,"c-article__content")]//div[//font[contains(translate(@color, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"),"#ff0000")] and normalize-space(.)="{text}"][1]/ancestor-or-self::div'
        print(xpath)

        ancestor_count = 0

        print('hola')
        #ancestors=tree.xpath(xpath)#from ancestor to descendant
        '''
        pool=ThreadPool(processes=1)
        result=pool.apply_async(test.test,args=(None,))
        ancestors=result.get()
        print('result',ancestors)
        '''
        '''
        ancestors=[]
        with ThreadPoolExecutor() as executor:
            future=executor.submit(tree.xpath,xpath)
            for future in as_completed([future]):
                ancestors=future.result()
                break
        '''
        '''
        def FindElementByXpath(q):
            results=tree.xpath(xpath)
            q.put(results)

        q=Queue()
        process=Process(target=FindElementByXpath,args=(q,))
        process.start()
        process.join()
        ancestors=q.get()
        '''
        '''
        ancestors = []
        with ProcessPoolExecutor() as executor:
            future = executor.submit(tree.xpath, xpath)
            print('AEfw')
            for future in as_completed([future]):
                ancestors = future.result()
                break
        '''
        '''
        def FindElementByXpath(q):
            results = tree.xpath(xpath)
            q.put(results)
        set_start_method('spawn')
        q=Queue()
        with get_context(method='spawn').Pool() as pool:
            result = pool.apply_async(FindElementByXpath, args=(q,))
            pool.close()
            pool.join()
            ancestors=q.get()
            print('result', ancestors)
        '''
        q = Queue()

        def FindElementByXpath(q):
            results = tree.xpath(xpath)
            q.put(results)

        tpool.execute(FindElementByXpath, q)
        ancestors = q.get()

        ancestors.reverse()
        print('hola again')
        print('ancestors:', ancestors)
        for ancestor_div in ancestors:  #from descendant to ancestor
            if ancestor_count < MAX_ANCESTOR_SEARCH:
                ancestor_count += 1
            else:
                break

            xpath = "following-sibling::div"

            sibling_count = 0
            siblings = ancestor_div.xpath(xpath)
            print('siblings:', siblings)
            for sibling_div in siblings:
                if sibling_count < MAX_SIBLING_SEARCH:
                    sibling_count += 1
                else:
                    break

                xpath = 'descendant::a[contains(@class,"photoswipe-image")][1]'
                title = sibling_div.xpath(xpath)  #type(title) == list
                #prettry print:print(repr(etree.tostring(title[0],method='text',encoding='utf-8').decode('utf-8')))
                if title:  #'title' is [] if the meme is not found
                    #the correct tag is the first one in list([0]), hence getting the first element
                    print(
                        repr(
                            etree.tostring(
                                title[0], method='text',
                                encoding='utf-8').decode(
                                    'utf-8')))  #print the content of the tag
                    print('meme url:', meme_url := title[0].get('href'))
                    print(f'time spent on floor {floor}:',
                          time.time() - now, 's')
                    return meme_url
        print(f'time spent on floor {floor}:', time.time() - now, 's')
        eventlet.sleep(
            0
        )  #temporarily halt the thread and  yield the CPU to allow other waiting tasks to run.
    return None
    """
    old version:
    for floor in floors:
        url='https://forum.gamer.com.tw/C.php?bsn=60076&snA=5491441'
        response=requests.get(url+f"&to={floor}")
        tree=etree.HTML(response.text)
        xpath=f'//div[//font[contains(translate(@color, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"),"#ff0000")] and normalize-space(.)="{text}"][1]/ancestor-or-self::div/following-sibling::div//a[contains(@class,"photoswipe-image")][1]'
        print(xpath)
        title = tree.xpath(xpath) #type(title) == list
        #print(repr(etree.tostring(title[0],method='text',encoding='utf-8').decode('utf-8')))
        if title:#'title' is [] if the meme is not found
            #the correct tag is the first one in list([0]), hence getting the first element
            print(repr(etree.tostring(title[0], method='text', encoding='utf-8').decode('utf-8')))#print the content of the tag
            print('meme url:', meme_url:=title[0].get('href'))
            return meme_url
    return None
    """


#find_meme('明天再來')
Esempio n. 41
0
 def gt_fun():
     semaphore.send(None)
     tpool.execute(native_fun)
Esempio n. 42
0
 def wrapper(*args, **kwargs):
     from eventlet import tpool
     output = tpool.execute(original, *args, **kwargs)
     return output
Esempio n. 43
0
def Connection(*args, **kw):
    conn = tpool.execute(__orig_connections.Connection, *args, **kw)
    return tpool.Proxy(conn, autowrap_names=('cursor', ))
Esempio n. 44
0
 def _define_filter(self, xml):
     if callable(xml):
         xml = xml()
     # execute in a native thread and block current greenthread until done
     tpool.execute(self._conn.nwfilterDefineXML, xml)
Esempio n. 45
0
def execute(*cmd, **kwargs):
    """Helper method to shell out and execute a command through subprocess.

    Allows optional retry.

    :param cmd:             Passed to subprocess.Popen.
    :type cmd:              string
    :param cwd:             Set the current working directory
    :type cwd:              string
    :param process_input:   Send to opened process.
    :type process_input:    string or bytes
    :param env_variables:   Environment variables and their values that
                            will be set for the process.
    :type env_variables:    dict
    :param check_exit_code: Single bool, int, or list of allowed exit
                            codes.  Defaults to [0].  Raise
                            :class:`ProcessExecutionError` unless
                            program exits with one of these code.
    :type check_exit_code:  boolean, int, or [int]
    :param delay_on_retry:  True | False. Defaults to True. If set to True,
                            wait a short amount of time before retrying.
    :type delay_on_retry:   boolean
    :param attempts:        How many times to retry cmd.
    :type attempts:         int
    :param run_as_root:     True | False. Defaults to False. If set to True,
                            the command is prefixed by the command specified
                            in the root_helper kwarg.
    :type run_as_root:      boolean
    :param root_helper:     command to prefix to commands called with
                            run_as_root=True
    :type root_helper:      string
    :param shell:           whether or not there should be a shell used to
                            execute this command. Defaults to false.
    :type shell:            boolean
    :param loglevel:        log level for execute commands.
    :type loglevel:         int.  (Should be logging.DEBUG or logging.INFO)
    :param log_errors:      Should stdout and stderr be logged on error?
                            Possible values are
                            :py:attr:`~.LogErrors.DEFAULT`,
                            :py:attr:`~.LogErrors.FINAL`, or
                            :py:attr:`~.LogErrors.ALL`. Note that the
                            values :py:attr:`~.LogErrors.FINAL` and
                            :py:attr:`~.LogErrors.ALL`
                            are **only** relevant when multiple attempts of
                            command execution are requested using the
                            ``attempts`` parameter.
    :type log_errors:       :py:class:`~.LogErrors`
    :param binary:          On Python 3, return stdout and stderr as bytes if
                            binary is True, as Unicode otherwise.
    :type binary:           boolean
    :param on_execute:      This function will be called upon process creation
                            with the object as a argument.  The Purpose of this
                            is to allow the caller of `processutils.execute` to
                            track process creation asynchronously.
    :type on_execute:       function(:class:`subprocess.Popen`)
    :param on_completion:   This function will be called upon process
                            completion with the object as a argument.  The
                            Purpose of this is to allow the caller of
                            `processutils.execute` to track process completion
                            asynchronously.
    :type on_completion:    function(:class:`subprocess.Popen`)
    :param preexec_fn:      This function will be called
                            in the child process just before the child
                            is executed. WARNING: On windows, we silently
                            drop this preexec_fn as it is not supported by
                            subprocess.Popen on windows (throws a
                            ValueError)
    :type preexec_fn:       function()
    :param prlimit:         Set resource limits on the child process. See
                            below for a detailed description.
    :type prlimit:          :class:`ProcessLimits`
    :param python_exec:     The python executable to use for enforcing
                            prlimits. If this is not set or is None, it will
                            default to use sys.executable.
    :type python_exec:      string
    :param timeout:         Timeout (in seconds) to wait for the process
                            termination. If timeout is reached,
                            :class:`subprocess.TimeoutExpired` is raised.
    :type timeout:          int
    :returns:               (stdout, stderr) from process execution
    :raises:                :class:`UnknownArgumentError` on
                            receiving unknown arguments
    :raises:                :class:`ProcessExecutionError`
    :raises:                :class:`OSError`
    :raises:                :class:`subprocess.TimeoutExpired`

    The *prlimit* parameter can be used to set resource limits on the child
    process.  If this parameter is used, the child process will be spawned by a
    wrapper process which will set limits before spawning the command.

    .. versionchanged:: 3.17
       *process_input* can now be either bytes or string on python3.

    .. versionchanged:: 3.4
       Added *prlimit* optional parameter.

    .. versionchanged:: 1.5
       Added *cwd* optional parameter.

    .. versionchanged:: 1.9
       Added *binary* optional parameter. On Python 3, *stdout* and *stderr*
       are now returned as Unicode strings by default, or bytes if *binary* is
       true.

    .. versionchanged:: 2.1
       Added *on_execute* and *on_completion* optional parameters.

    .. versionchanged:: 2.3
       Added *preexec_fn* optional parameter.
    """

    cwd = kwargs.pop('cwd', None)
    process_input = kwargs.pop('process_input', None)
    if process_input is not None:
        process_input = encodeutils.to_utf8(process_input)
    env_variables = kwargs.pop('env_variables', None)
    check_exit_code = kwargs.pop('check_exit_code', [0])
    ignore_exit_code = False
    delay_on_retry = kwargs.pop('delay_on_retry', True)
    attempts = kwargs.pop('attempts', 1)
    run_as_root = kwargs.pop('run_as_root', False)
    root_helper = kwargs.pop('root_helper', '')
    shell = kwargs.pop('shell', False)
    loglevel = kwargs.pop('loglevel', logging.DEBUG)
    log_errors = kwargs.pop('log_errors', None)
    if log_errors is None:
        log_errors = LogErrors.DEFAULT
    binary = kwargs.pop('binary', False)
    on_execute = kwargs.pop('on_execute', None)
    on_completion = kwargs.pop('on_completion', None)
    preexec_fn = kwargs.pop('preexec_fn', None)
    prlimit = kwargs.pop('prlimit', None)
    python_exec = kwargs.pop('python_exec', None) or sys.executable
    timeout = kwargs.pop('timeout', None)

    if isinstance(check_exit_code, bool):
        ignore_exit_code = not check_exit_code
        check_exit_code = [0]
    elif isinstance(check_exit_code, int):
        check_exit_code = [check_exit_code]

    if kwargs:
        raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs)

    if isinstance(log_errors, int):
        log_errors = LogErrors(log_errors)
    if not isinstance(log_errors, LogErrors):
        raise InvalidArgumentError(_('Got invalid arg log_errors: %r') %
                                   log_errors)

    if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
        if not root_helper:
            raise NoRootWrapSpecified(
                message=_('Command requested root, but did not '
                          'specify a root helper.'))
        if shell:
            # root helper has to be injected into the command string
            cmd = [' '.join((root_helper, cmd[0]))] + list(cmd[1:])
        else:
            # root helper has to be tokenized into argument list
            cmd = shlex.split(root_helper) + list(cmd)

    cmd = [str(c) for c in cmd]

    if prlimit:
        if os.name == 'nt':
            LOG.log(loglevel,
                    _('Process resource limits are ignored as '
                      'this feature is not supported on Windows.'))
        else:
            args = [python_exec, '-m', 'oslo_concurrency.prlimit']
            args.extend(prlimit.prlimit_args())
            args.append('--')
            args.extend(cmd)
            cmd = args

    sanitized_cmd = strutils.mask_password(' '.join(cmd))

    watch = timeutils.StopWatch()
    while attempts > 0:
        attempts -= 1
        watch.restart()

        try:
            LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd)
            _PIPE = subprocess.PIPE  # pylint: disable=E1101

            if os.name == 'nt':
                on_preexec_fn = None
                close_fds = False
            else:
                on_preexec_fn = functools.partial(_subprocess_setup,
                                                  preexec_fn)
                close_fds = True

            obj = subprocess.Popen(cmd,
                                   stdin=_PIPE,
                                   stdout=_PIPE,
                                   stderr=_PIPE,
                                   close_fds=close_fds,
                                   preexec_fn=on_preexec_fn,
                                   shell=shell,  # nosec:B604
                                   cwd=cwd,
                                   env=env_variables)

            if on_execute:
                on_execute(obj)

            try:
                # eventlet.green.subprocess is not really greenthread friendly
                # on Windows. In order to avoid blocking other greenthreads,
                # we have to wrap this call using tpool.
                if eventlet_patched and os.name == 'nt':
                    result = tpool.execute(obj.communicate,
                                           process_input,
                                           timeout=timeout)
                else:
                    result = obj.communicate(process_input,
                                             timeout=timeout)

                obj.stdin.close()  # pylint: disable=E1101
                _returncode = obj.returncode  # pylint: disable=E1101
                LOG.log(loglevel, 'CMD "%s" returned: %s in %0.3fs',
                        sanitized_cmd, _returncode, watch.elapsed())
            except subprocess.TimeoutExpired:
                LOG.log(loglevel, 'CMD "%s" reached timeout in %0.3fs',
                        sanitized_cmd, watch.elapsed())
                raise
            finally:
                if on_completion:
                    on_completion(obj)

            if not ignore_exit_code and _returncode not in check_exit_code:
                (stdout, stderr) = result
                stdout = os.fsdecode(stdout)
                stderr = os.fsdecode(stderr)
                sanitized_stdout = strutils.mask_password(stdout)
                sanitized_stderr = strutils.mask_password(stderr)
                raise ProcessExecutionError(exit_code=_returncode,
                                            stdout=sanitized_stdout,
                                            stderr=sanitized_stderr,
                                            cmd=sanitized_cmd)
            if not binary and result is not None:
                (stdout, stderr) = result
                # Decode from the locale using using the surrogateescape error
                # handler (decoding cannot fail)
                stdout = os.fsdecode(stdout)
                stderr = os.fsdecode(stderr)
                return (stdout, stderr)
            else:
                return result

        except (ProcessExecutionError, OSError) as err:
            # if we want to always log the errors or if this is
            # the final attempt that failed and we want to log that.
            if log_errors == LOG_ALL_ERRORS or (
                    log_errors == LOG_FINAL_ERROR and not attempts):
                if isinstance(err, ProcessExecutionError):
                    format = _('%(desc)r\ncommand: %(cmd)r\n'
                               'exit code: %(code)r\nstdout: %(stdout)r\n'
                               'stderr: %(stderr)r')
                    LOG.log(loglevel, format, {"desc": err.description,
                                               "cmd": err.cmd,
                                               "code": err.exit_code,
                                               "stdout": err.stdout,
                                               "stderr": err.stderr})
                else:
                    format = _('Got an OSError\ncommand: %(cmd)r\n'
                               'errno: %(errno)r')
                    LOG.log(loglevel, format, {"cmd": sanitized_cmd,
                                               "errno": err.errno})

            if not attempts:
                LOG.log(loglevel, _('%r failed. Not Retrying.'),
                        sanitized_cmd)
                raise
            else:
                LOG.log(loglevel, _('%r failed. Retrying.'),
                        sanitized_cmd)
                if delay_on_retry:
                    time.sleep(random.randint(20, 200) / 100.0)
        finally:
            # NOTE(termie): this appears to be necessary to let the subprocess
            #               call clean something up in between calls, without
            #               it two execute calls in a row hangs the second one
            # NOTE(bnemec): termie's comment above is probably specific to the
            #               eventlet subprocess module, but since we still
            #               have to support that we're leaving the sleep.  It
            #               won't hurt anything in the stdlib case anyway.
            time.sleep(0)
Esempio n. 46
0
 def wrapper(*args, **kwargs):
     if (EVENTLET_NONBLOCKING_MODE_ENABLED
             and eventlet.getcurrent().parent):
         return tpool.execute(f, *args, **kwargs)
     else:
         return f(*args, **kwargs)
Esempio n. 47
0
def blocking(method, *args, **kw):
    if Config.is_eventlet():
        return tpool.execute(method, *args, **kw)
    else:
        return method(*args, **kw)
Esempio n. 48
0
 def call_xenapi(self, method, *args):
     """Call the specified XenAPI method on a background thread."""
     f = self._session.xenapi
     for m in method.split('.'):
         f = f.__getattr__(m)
     return tpool.execute(f, *args)
Esempio n. 49
0
def create_db():
    tpool.execute(
        execute,
        'CREATE TABLE IF NOT EXISTS users (id TEXT primary key, nickname TEXT not null, score INTEGER default 0, status TEXT, CHECK (status in("admin", "client")))',
        [])
Esempio n. 50
0
 def async_call_plugin(self, plugin, fn, args):
     """Call Async.host.call_plugin on a background thread."""
     return tpool.execute(self._unwrap_plugin_exceptions,
                          self._session.xenapi.Async.host.call_plugin,
                          self.get_xenapi_host(), plugin, fn, args)
Esempio n. 51
0
    def download(self, context, image_id, data=None, dst_path=None):
        """Calls out to Glance for data and writes data."""
        if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
            image = self.show(context, image_id, include_locations=True)
            for entry in image.get('locations', []):
                loc_url = entry['url']
                loc_meta = entry['metadata']
                o = urlparse.urlparse(loc_url)
                xfer_mod = self._get_transfer_module(o.scheme)
                if xfer_mod:
                    try:
                        xfer_mod.download(context, o, dst_path, loc_meta)
                        LOG.info("Successfully transferred using %s", o.scheme)
                        return
                    except Exception:
                        LOG.exception("Download image error")

        try:
            image_chunks = self._client.call(context, 2, 'data', image_id)
        except Exception:
            _reraise_translated_image_exception(image_id)

        # Retrieve properties for verification of Glance image signature
        verifier = None
        if CONF.glance.verify_glance_signatures:
            image_meta_dict = self.show(context, image_id,
                                        include_locations=False)
            image_meta = objects.ImageMeta.from_dict(image_meta_dict)
            img_signature = image_meta.properties.get('img_signature')
            img_sig_hash_method = image_meta.properties.get(
                'img_signature_hash_method'
            )
            img_sig_cert_uuid = image_meta.properties.get(
                'img_signature_certificate_uuid'
            )
            img_sig_key_type = image_meta.properties.get(
                'img_signature_key_type'
            )
            try:
                verifier = signature_utils.get_verifier(
                    context=context,
                    img_signature_certificate_uuid=img_sig_cert_uuid,
                    img_signature_hash_method=img_sig_hash_method,
                    img_signature=img_signature,
                    img_signature_key_type=img_sig_key_type,
                )
            except cursive_exception.SignatureVerificationError:
                with excutils.save_and_reraise_exception():
                    LOG.error('Image signature verification failed '
                              'for image: %s', image_id)

        close_file = False
        if data is None and dst_path:
            data = open(dst_path, 'wb')
            close_file = True

        if data is None:

            # Perform image signature verification
            if verifier:
                try:
                    for chunk in image_chunks:
                        verifier.update(chunk)
                    verifier.verify()

                    LOG.info('Image signature verification succeeded '
                             'for image: %s', image_id)

                except cryptography.exceptions.InvalidSignature:
                    with excutils.save_and_reraise_exception():
                        LOG.error('Image signature verification failed '
                                  'for image: %s', image_id)
            return image_chunks
        else:
            # WRS: offload image download to another thread to reduce chances
            #      of nova-compute getting stuck on disk IO
            def write_image(data, image_chunks, close_file, verifier):
                try:
                    for chunk in image_chunks:
                        if verifier:
                            verifier.update(chunk)
                        data.write(chunk)
                        # Without this periodic tasks get delayed
                        time.sleep(0)
                    if verifier:
                        verifier.verify()
                        LOG.info('Image signature verification succeeded '
                                 'for image %s', image_id)
                except cryptography.exceptions.InvalidSignature:
                    data.truncate(0)
                    with excutils.save_and_reraise_exception():
                        LOG.error('Image signature verification failed '
                                  'for image: %s', image_id)
                except Exception as ex:
                    with excutils.save_and_reraise_exception():
                        LOG.error("Error writing to %(path)s: "
                                  "%(exception)s",
                                  {'path': dst_path, 'exception': ex})
                finally:
                    if close_file:
                        # Ensure that the data is pushed all the way down to
                        # persistent storage. This ensures that in the event
                        # of a subsequent host crash we don't have running
                        # instances using a corrupt backing file.
                        data.flush()
                        os.fsync(data.fileno())
                        data.close()
            tpool.execute(write_image, data, image_chunks, close_file,
                          verifier)
Esempio n. 52
0
        last_sync = 0
        with file.mkstemp() as (fd, tmppath):
            if 'content-length' in request.headers:
                fallocate(fd, int(request.headers['content-length']))
            for chunk in iter(lambda: request.body_file.read(
                    self.network_chunk_size), ''):
                upload_size += len(chunk)
                if time.time() > upload_expiration:
                    return HTTPRequestTimeout(request=request)
                etag.update(chunk)
                while chunk:
                    written = os.write(fd, chunk)
                    chunk = chunk[written:]
                # For large files sync every 512MB (by default) written
                if upload_size - last_sync >= self.bytes_per_sync:
                    tpool.execute(os.fdatasync, fd)
                    drop_buffer_cache(fd, last_sync, upload_size - last_sync)
                    last_sync = upload_size

            if 'content-length' in request.headers and \
                    int(request.headers['content-length']) != upload_size:
                return Response(status='499 Client Disconnect')
            etag = etag.hexdigest()
            if 'etag' in request.headers and \
                            request.headers['etag'].lower() != etag:
                return HTTPUnprocessableEntity(request=request)
            metadata = {
                'X-Timestamp': request.headers['x-timestamp'],
                'Content-Type': request.headers['content-type'],
                'ETag': etag,
                'Content-Length': str(os.fstat(fd).st_size),
Esempio n. 53
0
def find_user(uid):
    return tpool.execute(
        execute,
        'SELECT id, nickname, score, status FROM users WHERE id=? LIMIT 1',
        [uid])[0]
Esempio n. 54
0
    def update_deleted(self, job):
        """
        High-level method that replicates a single partition that doesn't
        belong on this node.

        :param job: a dict containing info about the partition to be replicated
        """
        def tpool_get_suffixes(path):
            return [
                suff for suff in os.listdir(path)
                if len(suff) == 3 and isdir(join(path, suff))
            ]

        self.replication_count += 1
        self.logger.increment('partition.delete.count.%s' % (job['device'], ))
        self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
        begin = time.time()
        try:
            responses = []
            suffixes = tpool.execute(tpool_get_suffixes, job['path'])
            synced_remote_regions = {}
            delete_objs = None
            if suffixes:
                for node in job['nodes']:
                    kwargs = {}
                    if node['region'] in synced_remote_regions and \
                            self.conf.get('sync_method', 'rsync') == 'ssync':
                        kwargs['remote_check_objs'] = \
                            synced_remote_regions[node['region']]
                    # candidates is a dict(hash=>timestamp) of objects
                    # for deletion
                    success, candidates = self.sync(node, job, suffixes,
                                                    **kwargs)
                    if success:
                        with Timeout(self.http_timeout):
                            conn = http_connect(node['replication_ip'],
                                                node['replication_port'],
                                                node['device'],
                                                job['partition'],
                                                'REPLICATE',
                                                '/' + '-'.join(suffixes),
                                                headers=self.headers)
                            conn.getresponse().read()
                        if node['region'] != job['region']:
                            synced_remote_regions[node['region']] = \
                                candidates.keys()
                    responses.append(success)
                for region, cand_objs in synced_remote_regions.items():
                    if delete_objs is None:
                        delete_objs = cand_objs
                    else:
                        delete_objs = delete_objs.intersection(cand_objs)
            if self.handoff_delete:
                # delete handoff if we have had handoff_delete successes
                delete_handoff = len([resp for resp in responses if resp]) >= \
                    self.handoff_delete
            else:
                # delete handoff if all syncs were successful
                delete_handoff = len(responses) == len(job['nodes']) and \
                    all(responses)
            if delete_handoff:
                if (self.conf.get('sync_method', 'rsync') == 'ssync'
                        and delete_objs is not None):
                    self.logger.info(_("Removing %s objects"),
                                     len(delete_objs))
                    self.delete_handoff_objs(job, delete_objs)
                else:
                    self.delete_partition(job['path'])
            elif not suffixes:
                self.delete_partition(job['path'])
        except (Exception, Timeout):
            self.logger.exception(_("Error syncing handoff partition"))
        finally:
            self.partition_times.append(time.time() - begin)
            self.logger.timing_since('partition.delete.timing', begin)
Esempio n. 55
0
            except OSError:
                return HTTPInsufficientStorage(drive=device, request=request)
            reader = request.environ['wsgi.input'].read
            for chunk in iter(lambda: reader(self.network_chunk_size), ''):
                start_time = time.time()
                upload_size += len(chunk)
                if time.time() > upload_expiration:
                    self.logger.increment('PUT.timeouts')
                    return HTTPRequestTimeout(request=request)
                etag.update(chunk)
                while chunk:
                    written = os.write(fd, chunk)
                    chunk = chunk[written:]
                # For large files sync every 512MB (by default) written
                if upload_size - last_sync >= self.bytes_per_sync:
                    tpool.execute(fdatasync, fd)
                    drop_buffer_cache(fd, last_sync, upload_size - last_sync)
                    last_sync = upload_size
                sleep()
                elapsed_time += time.time() - start_time

            if upload_size:
                self.logger.transfer_rate('PUT.' + device + '.timing',
                                          elapsed_time, upload_size)

            if 'content-length' in request.headers and \
                    int(request.headers['content-length']) != upload_size:
                return HTTPClientDisconnect(request=request)
            etag = etag.hexdigest()
            if 'etag' in request.headers and \
                    request.headers['etag'].lower() != etag:
Esempio n. 56
0
    def update(self, job):
        """
        High-level method that replicates a single partition.

        :param job: a dict containing info about the partition to be replicated
        """
        self.replication_count += 1
        self.logger.increment('partition.update.count.%s' % (job['device'], ))
        begin = time.time()
        try:
            hashed, local_hash = tpool.execute(
                tpooled_get_hashes,
                job['path'],
                do_listdir=(self.replication_count % 10) == 0,
                reclaim_age=self.reclaim_age)
            # See tpooled_get_hashes "Hack".
            if isinstance(hashed, BaseException):
                raise hashed
            self.suffix_hash += hashed
            self.logger.update_stats('suffix.hashes', hashed)
            attempts_left = len(job['nodes'])
            nodes = itertools.chain(
                job['nodes'],
                self.object_ring.get_more_nodes(int(job['partition'])))
            while attempts_left > 0:
                # If this throws StopIterator it will be caught way below
                node = next(nodes)
                attempts_left -= 1
                try:
                    with Timeout(self.http_timeout):
                        resp = http_connect(node['ip'],
                                            node['port'],
                                            node['device'],
                                            job['partition'],
                                            'REPLICATE',
                                            '',
                                            headers={
                                                'Content-Length': '0'
                                            }).getresponse()
                        if resp.status == HTTP_INSUFFICIENT_STORAGE:
                            self.logger.error(
                                _('%(ip)s/%(device)s responded'
                                  ' as unmounted'), node)
                            attempts_left += 1
                            continue
                        if resp.status != HTTP_OK:
                            self.logger.error(
                                _("Invalid response %(resp)s "
                                  "from %(ip)s"), {
                                      'resp': resp.status,
                                      'ip': node['ip']
                                  })
                            continue
                        remote_hash = pickle.loads(resp.read())
                        del resp
                    suffixes = [
                        suffix for suffix in local_hash
                        if local_hash[suffix] != remote_hash.get(suffix, -1)
                    ]
                    if not suffixes:
                        continue
                    hashed, recalc_hash = tpool.execute(
                        tpooled_get_hashes,
                        job['path'],
                        recalculate=suffixes,
                        reclaim_age=self.reclaim_age)
                    # See tpooled_get_hashes "Hack".
                    if isinstance(hashed, BaseException):
                        raise hashed
                    self.logger.update_stats('suffix.hashes', hashed)
                    local_hash = recalc_hash
                    suffixes = [
                        suffix for suffix in local_hash
                        if local_hash[suffix] != remote_hash.get(suffix, -1)
                    ]
                    self.rsync(node, job, suffixes)
                    with Timeout(self.http_timeout):
                        conn = http_connect(node['ip'],
                                            node['port'],
                                            node['device'],
                                            job['partition'],
                                            'REPLICATE',
                                            '/' + '-'.join(suffixes),
                                            headers={'Content-Length': '0'})
                        conn.getresponse().read()
                    self.suffix_sync += len(suffixes)
                    self.logger.update_stats('suffix.syncs', len(suffixes))
                except (Exception, Timeout):
                    self.logger.exception(
                        _("Error syncing with node: %s") % node)
            self.suffix_count += len(local_hash)
        except (Exception, Timeout):
            self.logger.exception(_("Error syncing partition"))
        finally:
            self.partition_times.append(time.time() - begin)
            self.logger.timing_since('partition.update.timing', begin)
Esempio n. 57
0
def verify_glance_image_signature(context, image_service, image_id, path):
    verifier = None
    image_meta = image_service.show(context, image_id)
    image_properties = image_meta.get('properties', {})
    img_signature = image_properties.get('img_signature')
    img_sig_hash_method = image_properties.get('img_signature_hash_method')
    img_sig_cert_uuid = image_properties.get('img_signature_certificate_uuid')
    img_sig_key_type = image_properties.get('img_signature_key_type')
    if all(m is None for m in [
            img_signature, img_sig_cert_uuid, img_sig_hash_method,
            img_sig_key_type
    ]):
        # NOTE(tommylikehu): We won't verify the image signature
        # if none of the signature metadata presents.
        return False
    if any(m is None for m in [
            img_signature, img_sig_cert_uuid, img_sig_hash_method,
            img_sig_key_type
    ]):
        LOG.error('Image signature metadata for image %s is '
                  'incomplete.', image_id)
        raise exception.InvalidSignatureImage(image_id=image_id)

    try:
        verifier = signature_utils.get_verifier(
            context=context,
            img_signature_certificate_uuid=img_sig_cert_uuid,
            img_signature_hash_method=img_sig_hash_method,
            img_signature=img_signature,
            img_signature_key_type=img_sig_key_type,
        )
    except cursive_exception.SignatureVerificationError:
        message = _('Failed to get verifier for image: %s') % image_id
        LOG.error(message)
        raise exception.ImageSignatureVerificationException(reason=message)
    if verifier:
        with fileutils.remove_path_on_error(path):
            with open(path, "rb") as tem_file:
                try:
                    tpool.execute(_verify_image, tem_file, verifier)
                    LOG.info(
                        'Image signature verification succeeded '
                        'for image: %s', image_id)
                    return True
                except cryptography.exceptions.InvalidSignature:
                    message = _('Image signature verification '
                                'failed for image: %s') % image_id
                    LOG.error(message)
                    raise exception.ImageSignatureVerificationException(
                        reason=message)
                except Exception as ex:
                    message = _('Failed to verify signature for '
                                'image: %(image)s due to '
                                'error: %(error)s ') % {
                                    'image': image_id,
                                    'error': six.text_type(ex)
                                }
                    LOG.error(message)
                    raise exception.ImageSignatureVerificationException(
                        reason=message)
    return False
Esempio n. 58
0
    def update_deleted(self, job):
        """
        High-level method that replicates a single partition that doesn't
        belong on this node.

        :param job: a dict containing info about the partition to be replicated
        """
        def tpool_get_suffixes(path):
            return [
                suff for suff in os.listdir(path)
                if len(suff) == 3 and isdir(join(path, suff))
            ]

        self.replication_count += 1
        self.logger.increment('partition.delete.count.%s' % (job['device'], ))
        headers = dict(self.default_headers)
        headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
        failure_devs_info = set()
        begin = time.time()
        handoff_partition_deleted = False
        try:
            responses = []
            suffixes = tpool.execute(tpool_get_suffixes, job['path'])
            synced_remote_regions = {}
            delete_objs = None
            if suffixes:
                for node in job['nodes']:
                    self.stats['rsync'] += 1
                    kwargs = {}
                    if node['region'] in synced_remote_regions and \
                            self.conf.get('sync_method', 'rsync') == 'ssync':
                        kwargs['remote_check_objs'] = \
                            synced_remote_regions[node['region']]
                    # candidates is a dict(hash=>timestamp) of objects
                    # for deletion
                    success, candidates = self.sync(node, job, suffixes,
                                                    **kwargs)
                    if success:
                        with Timeout(self.http_timeout):
                            conn = http_connect(node['replication_ip'],
                                                node['replication_port'],
                                                node['device'],
                                                job['partition'],
                                                'REPLICATE',
                                                '/' + '-'.join(suffixes),
                                                headers=headers)
                            conn.getresponse().read()
                        if node['region'] != job['region']:
                            synced_remote_regions[node['region']] = viewkeys(
                                candidates)
                    else:
                        failure_devs_info.add(
                            (node['replication_ip'], node['device']))
                    responses.append(success)
                for cand_objs in synced_remote_regions.values():
                    if delete_objs is None:
                        delete_objs = cand_objs
                    else:
                        delete_objs = delete_objs & cand_objs

            if self.handoff_delete:
                # delete handoff if we have had handoff_delete successes
                delete_handoff = len([resp for resp in responses if resp]) >= \
                    self.handoff_delete
            else:
                # delete handoff if all syncs were successful
                delete_handoff = len(responses) == len(job['nodes']) and \
                    all(responses)
            if delete_handoff:
                self.stats['remove'] += 1
                if (self.conf.get('sync_method', 'rsync') == 'ssync'
                        and delete_objs is not None):
                    self.logger.info(_("Removing %s objects"),
                                     len(delete_objs))
                    _junk, error_paths = self.delete_handoff_objs(
                        job, delete_objs)
                    # if replication works for a hand-off device and it failed,
                    # the remote devices which are target of the replication
                    # from the hand-off device will be marked. Because cleanup
                    # after replication failed means replicator needs to
                    # replicate again with the same info.
                    if error_paths:
                        failure_devs_info.update([
                            (failure_dev['replication_ip'],
                             failure_dev['device'])
                            for failure_dev in job['nodes']
                        ])
                else:
                    self.delete_partition(job['path'])
                    handoff_partition_deleted = True
            elif not suffixes:
                self.delete_partition(job['path'])
                handoff_partition_deleted = True
        except (Exception, Timeout):
            self.logger.exception(_("Error syncing handoff partition"))
            self._add_failure_stats(failure_devs_info)
        finally:
            target_devs_info = set([(target_dev['replication_ip'],
                                     target_dev['device'])
                                    for target_dev in job['nodes']])
            self.stats['success'] += len(target_devs_info - failure_devs_info)
            if not handoff_partition_deleted:
                self.handoffs_remaining += 1
            self.partition_times.append(time.time() - begin)
            self.logger.timing_since('partition.delete.timing', begin)
Esempio n. 59
0
 def __enter__(self):
     # Eventlet does not work with multiprocessing's Semaphore, so we have
     # to execute it in a native thread to avoid getting blocked when trying
     # to acquire the semaphore.
     return tpool.execute(self.semaphore.__enter__)
Esempio n. 60
0
def update_score(uid, diff):
    tpool.execute(execute, 'UPDATE users SET score=score+? WHERE id=?',
                  [diff, uid])