def chunk_fetch(self, volume_id, start_after=None, limit=None, rebuild=False): result = [] if start_after is not None: start_after = start_after.encode('utf8') incident_date = self.admin_get_incident_date(volume_id) if rebuild and incident_date is None: # No incident date set so no chunks needs to be rebuild return result db_iter = self._get_db_chunk(volume_id).iterator(start=start_after, include_start=False) count = 0 for key, value in db_iter: if limit is not None and count >= limit: break data = json.loads(value) if rebuild: if data.get('rtime'): continue # already rebuilt mtime = data.get('mtime') if int(mtime) > incident_date: continue # chunk pushed after the incident result.append((key, data)) count += 1 return result
def chunk_push(self, volume_id, container_id, content_id, chunk_id, **data): key = "%s|%s|%s" % (container_id, content_id, chunk_id) value = self._get_db_chunk(volume_id).get(key.encode('utf8')) if value is not None: value = json.loads(value) else: value = dict() for k, v in data.iteritems(): value[k] = v if 'mtime' not in value: # not consistent if 'rtime' in value: # In functionnal test, we can encounter the case where rebuild # update (rtime) arrives before creation update (first mtime) value['mtime'] = value['rtime'] else: raise ServerException("mtime is mandatory") value = json.dumps(value) self._get_db_chunk(volume_id).put(key.encode('utf8'), value.encode('utf8'))
def chunk_status(self, volume_id): total_chunks = 0 total_chunks_rebuilt = 0 incident_date = self.admin_get_incident_date(volume_id) containers = dict() for key, value in self._get_db_chunk(volume_id): total_chunks += 1 container, content, chunk = key.split('|') try: containers[container]['total'] += 1 except KeyError: containers[container] = {'total': 1} if incident_date is not None: containers[container]['rebuilt'] = 0 data = json.loads(value) rtime = data.get('rtime') if rtime is not None: total_chunks_rebuilt += 1 containers[container]['rebuilt'] += 1 result = {'chunk': {'total': total_chunks}, 'container': containers} if incident_date is not None: result['rebuild'] = {'incident_date': incident_date} result['chunk']['rebuilt'] = total_chunks_rebuilt return result
def chunk_fetch(self, volume_id, start_after=None, limit=None, rebuild=False): result = [] if start_after is not None: start_after = start_after.encode('utf8') incident_date = self.admin_get_incident_date(volume_id) if rebuild and incident_date is None: # No incident date set so no chunks needs to be rebuild return result db_iter = self._get_db_chunk(volume_id).iterator( start=start_after, include_start=False) count = 0 for key, value in db_iter: if limit is not None and count >= limit: break data = json.loads(value) if rebuild: if data.get('rtime'): continue # already rebuilt mtime = data.get('mtime') if int(mtime) > incident_date: continue # chunk pushed after the incident result.append((key, data)) count += 1 return result
def chunk_status(self, volume_id): total_chunks = 0 total_chunks_rebuilt = 0 incident_date = self.admin_get_incident_date(volume_id) containers = dict() for key, value in self._get_db_chunk(volume_id): total_chunks += 1 container, content, chunk = key.split('|') try: containers[container]['total'] += 1 except KeyError: containers[container] = {'total': 1} if incident_date is not None: containers[container]['rebuilt'] = 0 data = json.loads(value) rtime = data.get('rtime') if rtime is not None: total_chunks_rebuilt += 1 containers[container]['rebuilt'] += 1 result = { 'chunk': {'total': total_chunks}, 'container': containers } if incident_date is not None: result['rebuild'] = {'incident_date': incident_date} result['chunk']['rebuilt'] = total_chunks_rebuilt return result
def safe_decode_job(self, job_id, data): try: env = json.loads(data) env['job_id'] = job_id return env except Exception as exc: self.logger.warn('decoding job "%s"', str(exc.message)) return None
def safe_decode_job(self, job_id, data): try: env = json.loads(data) env['job_id'] = job_id return env except Exception as e: self.logger.warn('decoding job "%s"', str(e.message)) return None
def on_account_update(self, req): account_id = self._get_account_id(req) decoded = json.loads(req.get_data()) metadata = decoded.get("metadata") to_delete = decoded.get("to_delete") success = self.backend.update_account_metadata(account_id, metadata, to_delete) if success: return Response(status=204) return NotFound("Account not found")
def on_account_update(self, req): account_id = self._get_account_id(req) decoded = json.loads(req.get_data()) metadata = decoded.get('metadata') to_delete = decoded.get('to_delete') success = self.backend.update_account_metadata( account_id, metadata, to_delete) if success: return Response(status=204) return NotFound('Account not found')
def admin_clear(self, volume_id, clear_all): db = self._get_db_chunk(volume_id) count = 0 for key, value in db: if not clear_all: data = json.loads(value) if clear_all or 'rtime' in data: count += 1 db.delete(key) self._get_db_admin(volume_id).delete('incident_date') return count
def on_account_container_update(self, req): account_id = self._get_account_id(req) d = json.loads(req.get_data()) name = d.get("name") mtime = d.get("mtime") dtime = d.get("dtime") object_count = d.get("objects") bytes_used = d.get("bytes") info = self.backend.update_container(account_id, name, mtime, dtime, object_count, bytes_used) result = json.dumps(info) return Response(result)
def on_account_container_update(self, req): account_id = self._get_account_id(req) d = json.loads(req.get_data()) name = d.get('name') mtime = d.get('mtime') dtime = d.get('dtime') object_count = d.get('objects') bytes_used = d.get('bytes') info = self.backend.update_container(account_id, name, mtime, dtime, object_count, bytes_used) result = json.dumps(info) return Response(result)
def on_account_container_reset(self, req): account_id = self._get_account_id(req) data = json.loads(req.get_data()) name = data.get('name') mtime = data.get('mtime') dtime = None object_count = 0 bytes_used = 0 # Exceptions are catched by dispatch_request self.backend.update_container( account_id, name, mtime, dtime, object_count, bytes_used, autocreate_container=False) return Response(status=204)
def on_rdir_delete(self, req): volume = self._get_volume(req) decoded = json.loads(req.get_data()) chunk_id = decoded.get('chunk_id') if chunk_id is None: return BadRequest('Missing token chunk_id') container_id = decoded.get('container_id') if container_id is None: return BadRequest('Missing token container_id') content_id = decoded.get('content_id') if content_id is None: return BadRequest('Missing token content_id') self.backend.chunk_delete(volume, container_id, content_id, chunk_id) return Response(status=204)
def on_rdir_push(self, req): volume = self._get_volume(req) decoded = json.loads(req.get_data()) data = self._check_push(decoded) try: self.backend.chunk_push(volume, **data) except NoSuchDB: if req.args.get('create'): self.backend.create(volume) self.backend.chunk_push(volume, **data) else: return NotFound('No such volume') return Response(status=204)
def on_rdir_admin_lock(self, req): volume = self._get_volume(req) decoded = json.loads(req.get_data()) who = decoded.get('who') if who is None: return BadRequest('Missing token who') desc = self.backend.admin_lock(volume, who) if desc is not None: message = "Already locked by %s" % desc return Response(message, 403) return Response(status=204)
def on_rdir_admin_incident(self, req): volume = self._get_volume(req) if req.method == 'POST': decoded = json.loads(req.get_data()) date = decoded.get('date') if date is None or not isinstance(date, int): return BadRequest('Missing date or bad format') self.backend.admin_set_incident_date(volume, date) return Response(status=204) else: date = self.backend.admin_get_incident_date(volume) resp = {} if date: resp = {'date': date} return Response(json.dumps(resp), mimetype='application/json')
def on_rdir_admin_clear(self, req): volume = self._get_volume(req) decoded = json.loads(req.get_data()) clear_all = decoded.get('all', False) if not isinstance(clear_all, bool): return BadRequest('"all" must be true or false') lock = self.backend.admin_lock(volume, 'admin_clear') if lock is not None: return Response("Already locked by %s" % lock, 403) nb = self.backend.admin_clear(volume, clear_all) self.backend.admin_unlock(volume) resp = {'removed': nb} return Response(json.dumps(resp), mimetype='application/json')
def chunk_push(self, volume_id, container_id, content_id, chunk_id, **data): key = "%s|%s|%s" % (container_id, content_id, chunk_id) value = self._get_db_chunk(volume_id).get(key.encode('utf8')) if value is not None: value = json.loads(value) else: value = dict() for k, v in data.iteritems(): value[k] = v if 'mtime' not in value: # not consistent raise ServerException("mtime is mandatory") value = json.dumps(value) self._get_db_chunk(volume_id).put(key.encode('utf8'), value.encode('utf8'))
def on_rdir_fetch(self, req): volume = self._get_volume(req) pretty = req.args.get('pretty') decoded = json.loads(req.get_data()) start_after = decoded.get('start_after') limit = decoded.get('limit') if limit is not None and limit <= 0: return BadRequest('limit must be greater than 0') rebuild = decoded.get('rebuild', False) if not isinstance(rebuild, bool): return BadRequest('rebuild must be true or false') data = self.backend.chunk_fetch(volume, start_after=start_after, limit=limit, rebuild=rebuild) if pretty: body = json.dumps(data, indent=4) else: body = json.dumps(data) return Response(body, mimetype='application/json')
def load(self, json_mapping=None): """ Load the mapping from the cluster, from a JSON string or from a dictionary. """ if isinstance(json_mapping, basestring): raw_mapping = json.loads(json_mapping) elif isinstance(json_mapping, dict): raw_mapping = json_mapping else: raw_mapping = self.m0.list() for pfx, services_addrs in raw_mapping.iteritems(): services = list() for svc_addr in services_addrs: svc = self.services.get(svc_addr, {"addr": svc_addr}) services.append(svc) for svc in services: pfx_set = svc.get("prefixes", set([])) pfx_set.add(pfx) svc["prefixes"] = pfx_set self.svc_by_pfx[pfx] = services
def load(self, json_mapping=None): """ Load the mapping from the cluster, from a JSON string or from a dictionary. """ if isinstance(json_mapping, basestring): raw_mapping = json.loads(json_mapping) elif isinstance(json_mapping, dict): raw_mapping = json_mapping else: raw_mapping = self.m0.list() # pylint: disable=no-member for pfx, services_addrs in raw_mapping.iteritems(): services = list() # FIXME: this is REALLY annoying # self.prefix_to_base() takes the beginning of the prefix, # but here we have to take the end, because meta0 does # some byte swapping. base = pfx[4-self.digits:] for svc_addr in services_addrs: svc = self.services.get(svc_addr, {"addr": svc_addr}) services.append(svc) self.assign_services(base, services)
def safe_decode_job(self, job): try: return json.loads(job) except Exception as e: self.logger.warn('ERROR decoding job "%s"', str(e.message)) return None
def safe_decode_msg(self, msg): try: return json.loads(msg[1]) except Exception as e: self.logger.warn('ERROR decoding msg "%s"', str(e.message)) return None
def _direct_request(self, method, url, headers=None, data=None, json=None, params=None, admin_mode=False, pool_manager=None, **kwargs): """ Make an HTTP request. :param method: HTTP method to use (e.g. "GET") :type method: `str` :param url: URL to request :type url: `str` :keyword admin_mode: allow operations on slave or worm namespaces :type admin_mode: `bool` :keyword timeout: optional timeout for the request (in seconds). May be a `urllib3.Timeout(connect=connection_timeout, read=read_timeout)`. This method also accepts `connection_timeout` and `read_timeout` as separate arguments. :type timeout: `float` or `urllib3.Timeout` :keyword headers: optional headers to add to the request :type headers: `dict` :raise oio.common.exceptions.OioTimeout: in case of read, write or connection timeout :raise oio.common.exceptions.OioNetworkException: in case of connection error :raise oio.common.exceptions.OioException: in other case of HTTP error :raise oio.common.exceptions.ClientException: in case of HTTP status code >= 400 """ # Filter arguments that are not recognized by Requests out_kwargs = { k: v for k, v in kwargs.items() if k in URLLIB3_REQUESTS_KWARGS } # Ensure headers are all strings if headers: out_headers = {k: str(v) for k, v in headers.items()} else: out_headers = dict() if self.admin_mode or admin_mode: out_headers[ADMIN_HEADER] = '1' # Ensure there is a timeout if 'timeout' not in out_kwargs: out_kwargs['timeout'] = urllib3.Timeout( connect=kwargs.get('connection_timeout', CONNECTION_TIMEOUT), read=kwargs.get('read_timeout', READ_TIMEOUT)) # Convert json and add Content-Type if json: out_headers["Content-Type"] = "application/json" data = jsonlib.dumps(json) out_kwargs['headers'] = out_headers out_kwargs['body'] = data # Add query string if params: out_param = [] for k, v in params.items(): if v is not None: if isinstance(v, unicode): v = unicode(v).encode('utf-8') out_param.append((k, v)) encoded_args = urlencode(out_param) url += '?' + encoded_args if not pool_manager: pool_manager = self.pool_manager try: resp = pool_manager.request(method, url, **out_kwargs) body = resp.data if body: try: body = jsonlib.loads(body) except ValueError: pass except MaxRetryError as exc: if isinstance(exc.reason, NewConnectionError): raise exceptions.OioNetworkException(exc), None, \ sys.exc_info()[2] if isinstance(exc.reason, TimeoutError): raise exceptions.OioTimeout(exc), None, sys.exc_info()[2] raise exceptions.OioNetworkException(exc), None, sys.exc_info()[2] except (ProtocolError, ProxyError, ClosedPoolError) as exc: raise exceptions.OioNetworkException(exc), None, sys.exc_info()[2] except TimeoutError as exc: raise exceptions.OioTimeout(exc), None, sys.exc_info()[2] except HTTPError as exc: raise exceptions.OioException(exc), None, sys.exc_info()[2] if resp.status >= 400: raise exceptions.from_response(resp, body) return resp, body
def decode_msg(msg): return json.loads(msg[1])
def container_create_many(self, account, containers, properties=None, **kwargs): """ Create several containers. :param account: account in which to create the containers :type account: `str` :param containers: names of the containers :type containers: iterable of `str` :param properties: properties to set on the containers :type properties: `dict` :keyword headers: extra headers to send to the proxy :type headers: `dict` """ results = list() try: params = self._make_params(account) headers = gen_headers() headers.update(kwargs.get('headers') or {}) unformatted_data = list() for container in containers: unformatted_data.append({ 'name': container, 'properties': properties or {}, 'system': kwargs.get('system', {}) }) data = json.dumps({"containers": unformatted_data}) resp, body = self._request('POST', '/create_many', params=params, data=data, headers=headers) if resp.status_code not in (204, 201): raise exceptions.from_response(resp, body) for container in json.loads(body)["containers"]: results.append((container["name"], container["status"] == 201)) return results except exceptions.TooLarge: # Batch too large for the proxy pivot = len(containers) / 2 head = containers[:pivot] tail = containers[pivot:] if head: results += self.container_create_many(account, head, properties=properties, headers=headers, **kwargs) if tail: results += self.container_create_many(account, tail, properties=properties, headers=headers, **kwargs) return results except exceptions.NotFound: # Batches not supported by the proxy for container in containers: try: rc = self.container_create(account, container, properties=properties, headers=headers, **kwargs) results.append((container, rc)) except Exception: results.append((container, False)) return results
def _parse_stats_json(body): """Prefix each entry with 'stat.'""" body = json.loads(body) return {'stat.' + k: body[k] for k in body.keys()}