Ejemplo n.º 1
0
def fetch_snapshot_url(token):
    headers = {
        'Authorization': "Bearer {0}".format(token),
    }
    req = urllib2.Request(nest_api_url, None, headers)
    response = urllib2.urlopen(req)
    data = json.loads(response.read())

    # Verify the account has devices
    if 'devices' not in data:
        raise APIError(error_result("Nest account has no devices"))
    devices = data["devices"]

    # verify the account has cameras
    if 'cameras' not in devices:
        raise APIError(error_result("Nest account has no cameras"))
    cameras = devices["cameras"]

    # Verify the account has 1 Nest Cam
    if len(cameras.keys()) < 1:
        raise APIError(error_result("Nest account has no cameras"))

    camera_id = cameras.keys()[0]
    camera = cameras[camera_id]

    # Verify the Nest Cam has a Snapshot URL field
    if 'snapshot_url' not in camera:
        raise APIError(error_result("Camera has no snapshot URL"))
    snapshot_url = camera["snapshot_url"]

    return snapshot_url
Ejemplo n.º 2
0
def login():
    username = request.json.get('username')
    password = request.json.get('password')

    if username is None or password is None:
        raise APIError('Missing username or password', status_code=409)

    user = login_user(mongo, username, password)
    if not user:
        raise APIError('Incorrect username/password', status_code=409)

    return jsonify(user)
Ejemplo n.º 3
0
def new_user():
    username = request.json.get('username')
    password = request.json.get('password')
    permission = request.json.get('permission') # value of 0-2

    if username is None or password is None or permission is None:
        raise APIError('Missing username, password or permission', status_code=409)

    try:
        user = create_user(mongo, username, password, permission)
    except AssertionError as e:
        raise APIError(e.args[0])

    return jsonify(user)
Ejemplo n.º 4
0
    def count_data(self, search, phases=None, **kwargs):
        """
        Calculate the number of entries matching the keyword(s) specified

        Args:
            search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"},
                documented at https://developer.mpds.io/#Categories
            phases: (list) Phase IDs, according to the MPDS distinct phases concept
            kwargs: just a mockup

        Returns:
            count (int)
        """
        result = self._request(search, phases=phases, pagesize=10)

        if result['error']:
            raise APIError(result['error'], result.get('code', 0))

        if result['npages'] > self.maxnpages:
            warnings.warn(
                "\r\nDataset is too big, you may risk to change maxnpages from %s to %s" % \
                (self.maxnpages, int(math.ceil(result['count']/self.pagesize)))
            )

        return result['count']
Ejemplo n.º 5
0
async def api_register_user(*, email, name, passwd):
    if not name or not name.strip():
        raise APIValueError('name')
    if not email or not _RE_EMAIL.match(email):
        raise APIValueError('email')
    if not passwd or not _RE_SHA1.match(passwd):
        raise APIValueError('passwd')
    users = await User.findAll('email=?', [email])
    if len(users) > 0:
        raise APIError('register:failed', 'email', 'Email is already in use.')
    uid = next_id()
    sha1_passwd = '%s:%s' % (uid, passwd)
    user = User(id=uid,
                name=name.strip(),
                email=email,
                passwd=hashlib.sha1(sha1_passwd.encode('utf-8')).hexdigest(),
                image='http://www.gravatar.com/avatar/%s?d=mm&s=120' %
                hashlib.md5(email.encode('utf-8')).hexdigest())
    await user.save()
    # make session cookie:
    r = web.Response()
    r.set_cookie(COOKIE_NAME,
                 user2cookie(user, 86400),
                 max_age=86400,
                 httponly=True)
    user.passwd = '******'
    r.content_type = 'application/json'
    r.body = json.dumps(user, ensure_ascii=False).encode('utf-8')
    return r
Ejemplo n.º 6
0
def apierr_from_msg(code, err_msg="Error"):
    # get additional message (not from the API), for next steps or more details.
    help_msg = get_error_msg_help(code, '')

    return APIError(
        code, error_result("{0}: {1}. {2}".format(str(code), err_msg,
                                                  help_msg)))
Ejemplo n.º 7
0
def api_ceate_user(*, name, password):
    '''
    Create user. Request url: [POST /api/users]

    Post data:

        name: user name

        password: user password
    '''
    if not name or not name.strip():
        raise APIValueError('name')
    if not password or not password.strip():
        raise APIValueError('password')

    name = name.strip()
    password = hashlib.sha1(password.strip().encode('utf-8')).hexdigest()
    users = yield from User.findall(where="name='%s'" % name)
    if len(users) > 0:
        raise APIError(errors.EUSER_ALREADY_EXISTS, 'User %s already exist' % name)

    user = User(id=uuid.uuid4().hex, name=name, password=password)
    yield from user.save()
    yield from log_event(logging.INFO, event_user, event_action_add,
                         'Add user %s' % name)
    return dict(retcode=0, user=user)
    def _request(self, url, data):
        r = post(url, data).content.decode("utf8")

        r = loads(r)

        if r.get("message"):
            raise APIError(r["message"])

        return r["data"]
Ejemplo n.º 9
0
def forward(path):
    app.logger.info('Forwarding route: %s method: %s' % (path, request.method))

    forwarder = path.split('/')[0]
    if forwarder not in mapping:
        raise APIError('Path not found in mapping', 401)

    res, code = mapping[forwarder].make_request(
            path, request.method, request.data, copy_headers())

    resp = jsonify(res)
    resp.status_code = code
    return resp
Ejemplo n.º 10
0
def send_and_decode(req):
    """
    Send the request and return the decoded json of response.

    Args:
        req: urllib2.Request

    Returns:
        A dict of decoded response
    """
    try:
        with contextlib.closing(urlopen(req)) as res:
            raw = res.read()
            return json.loads(raw)
    except HTTPError, res:
        raw = res.read()
        try:
            data = json.loads(raw)
        except ValueError:
            raise APIError(res.code, res.reason)
        else:
            raise APIError(data["error"], data["message"])
Ejemplo n.º 11
0
def api_delete_user(*, id):
    '''
    Delete user by id. Request url [POST /api/users/{id}/delete
    '''
    user = yield from User.find(id)
    if user is None:
        raise APIResourceNotFoundError(id)

    if user.name == 'admin':
        raise APIError(errors.EUSER_CANNOT_DELETE_ADMIN, 'User %s can not be deleted' % user.name)

    yield from user.remove()
    yield from log_event(logging.INFO, event_user, event_action_del,
                         'Delete user %s' % user.name)
    return dict(retcode=0, id=id)
Ejemplo n.º 12
0
def remove_url_from_indexd_record(uuid, urls, indexclient):
    """
    remove url from indexd record

    Args:
        uuid(str): did
        urls(list): list of urls

    """
    doc = indexclient.get(uuid)
    if doc is not None:
        for url in urls:
            if url in doc.urls:
                doc.urls.remove(url)
            if url in doc.urls_metadata:
                del doc.urls_metadata[url]
        try:
            doc.patch()
        except Exception as e:
            raise APIError(
                "INDEX_CLIENT: Can not update the record with uuid {}. Detail {}"
                .format(uuid, e))
Ejemplo n.º 13
0
    def _massage(self, array, fields):
        if not fields:
            return array

        output = []

        for item in array:
            filtered = []

            for object_type in ['S', 'P', 'C']:
                if item['object_type'] == object_type:
                    for expr in fields.get(object_type, []):
                        if isinstance(expr, jmespath.parser.ParsedResult):
                            filtered.append(expr.search(item))
                        else:
                            filtered.append(expr)
                    break
            else:
                raise APIError("API error: unknown entry type")

            output.append(filtered)

        return output
Ejemplo n.º 14
0
def api_register_user(*, email, name, password):
    if not name or not name.strip():
        raise APIValueError('name')
    if not password or not _re_sha1.match(password):
        raise APIValueError('password')
    if not email or not _re_email.match(email):
        raise APIValueError('email')
    users = yield from User.find_all('email=?', [email])
    # 排除注册过的email
    if len(users) > 0:
        raise APIError('register failed ', 'email',
                       'The email has already existed')
    uid = next_id()
    row_password_string = '%s:%s' % (uid, password)
    # 这个email 是因为用了gravatar的服务 只要email在上面有头像 你的博客里面就可以出现了 虽然没有什么叼用
    user = User(id=uid,
                name=name.strip(),
                email=email,
                password=hashlib.sha1(
                    row_password_string.encode('utf-8')).hexdigest(),
                image='/static/images/default-user.jpg')
    # % hashlib.md5(email.encode('utf-8')).hexdigest()
    yield from user.save()
    r = web.Response()
    encode_str = user.name + '-' + user.email
    fake_string = base64.b64encode(encode_str.encode(encoding="utf-8"))
    r.set_cookie('FakeCookie',
                 fake_string.decode('utf-8'),
                 max_age=86400,
                 httponly=False)
    r.set_cookie(COOKIE_NAME,
                 user2cookie(user, 86400),
                 max_age=86400,
                 httponly=True)
    r.body = json.dumps(user, ensure_ascii=False).encode('utf-8')
    return r
Ejemplo n.º 15
0
    def compile_crystal(datarow, flavor='pmg'):
        """
        Helper method for representing the MPDS crystal structures in two flavors:
        either as a Pymatgen Structure object, or as an ASE Atoms object.

        Attention #1. Disordered structures (e.g. fractional indices in the chemical formulae)
        are not supported by this method, and hence the occupancies are not retrieved.
        Currently it's up to the user to take care of that (see e.g.
        https://doi.org/10.1186/s13321-016-0129-3 etc.).

        Attention #2. Pymatgen and ASE flavors are generally not compatible, e.g.
        primitive vs. crystallographic cell is defaulted,
        atoms wrapped or non-wrapped into the unit cell etc.

        Note, that the crystal structures are not retrieved by default,
        so for them one needs to specify the following fields:
            - cell_abc
            - sg_n
            - basis_noneq
            - els_noneq
        e.g. like this: {'S':['cell_abc', 'sg_n', 'basis_noneq', 'els_noneq']}

        Args:
            datarow: (list) Required data to construct crystal structure:
                [cell_abc, sg_n, basis_noneq, els_noneq]
            flavor: (str) Either "pmg", or "ase"

        Returns:
            - if flavor is pmg, Pymatgen Structure object
            - if flavor is ase, ASE Atoms object
        """
        if not datarow or not datarow[-1]:
            # this is either a P-entry with the cell data, which meets the search criterion,
            # or a 'low quality' structure with no basis (just unit cell parameters)
            return None

        if len(datarow) < 4:
            raise ValueError(
                "Must supply a data row that ends with the entries "
                "'cell_abc', 'sg_n', 'basis_noneq', 'els_noneq'")

        cell_abc, sg_n, basis_noneq, els_noneq = \
            datarow[-4], int(datarow[-3]), datarow[-2], _massage_atsymb(datarow[-1])

        if flavor == 'pmg' and use_pmg:
            return Structure.from_spacegroup(
                sg_n,
                Lattice.from_parameters(*cell_abc),
                els_noneq,
                basis_noneq
            )

        elif flavor == 'ase' and use_ase:
            atom_data = []

            for num, i in enumerate(basis_noneq):
                atom_data.append(Atom(els_noneq[num], tuple(i)))

            return crystal(
                atom_data,
                spacegroup=sg_n,
                cellpar=cell_abc,
                primitive_cell=True,
                onduplicates='replace'
            )

        else: raise APIError("Crystal structure treatment unavailable")
Ejemplo n.º 16
0
def test_custom_error():
    raise APIError(403, 'This is a custom error')
Ejemplo n.º 17
0
def update_url(fi, indexclient, provider="s3"):
    """
    update a record to indexd
    Args:
        fi(dict): file info
    Returns:
        None
    """
    try:
        if provider == "s3":
            bucket_name = utils.get_aws_bucket_name(fi, PROJECT_ACL)
        else:
            bucket_name = utils.get_google_bucket_name(fi, PROJECT_ACL)
        s3_object_name = "{}/{}".format(fi.get("id"), fi.get("file_name"))
    except UserError as e:
        raise APIError(
            "Can not get the bucket name of the record with uuid {}. Detail {}"
            .format(fi.get("id", ""), e))

    url = "{}://{}/{}".format(provider, bucket_name, s3_object_name)

    try:
        doc = indexclient.get(fi.get("id", ""))

        if doc is not None:
            need_update = False
            if url not in doc.urls:
                doc, _ = _remove_changed_url(doc, url)
                doc.urls.append(url)
                need_update = True

            if fi.get("acl") in {"[u'open']", "['open']"}:
                acl = ["*"]
            else:
                L = fi.get("acl")[1:-1].split(",")
                acl = []
                for ace in L:
                    ace = ace.strip()
                    if ace.startswith("u'"):
                        ace = ace[2:-1]
                    acl.append(ace)

            if doc.acl != acl:
                doc.acl = acl
                need_update = True

            if need_update:
                doc.patch()
            return doc is not None
    except Exception as e:
        # Don't break for any reason
        raise APIError(
            "INDEX_CLIENT: Can not update the record with uuid {}. Detail {}".
            format(fi.get("id", ""), e))

    urls = ["https://api.gdc.cancer.gov/data/{}".format(fi.get("id", "")), url]
    acl = (["*"] if fi.get("acl") in {"[u'open']", "['open']"} else
           fi.get("acl")[1:-1].split(","))
    try:
        doc = indexclient.create(
            did=fi.get("id"),
            hashes={"md5": fi.get("md5")},
            size=fi.get("size", 0),
            acl=acl,
            urls=urls,
        )
        return doc is not None
    except Exception as e:
        # Don't break for any reason
        raise APIError(
            "INDEX_CLIENT: Can not create the record with uuid {}. Detail {}".
            format(fi.get("id", ""), e))
def packing_algorithm(unordered_items, useable_boxes, max_weight,
                      zone=None):
    '''
    from items provided, and boxes available, pack boxes with items

    - returns a dictionary of boxes with an 2D array of items packed
        in each parcel
    Args:
        unordered_items (List[ItemTuple])
        useable_boxes (List(Dict[{
            'dimensions': List(int, int, int)
            'box': ShippingBox
        }]))
        max_weight (Int)
        zone (Int?)

    Raises:
        BoxError when no box could fit some SKU.

    Example:
    >>> packing_algorithm([item1, item2], [], {item1: 1, item2: 3}, True)
    {
        'package': (box=<best_standard_box object>,
                    items_per_box= [[ItemTuple, ItemTuple], [ItemTuple, ItemTuple]],
                    last_parcel=<smaller_box object>),
        'flat_rate': (box=<best_flat_rate object>,
                      items_per_box=[[ItemTuple], [ItemTuple, ItemTuple, ItemTuple]],
                      last_parcel=None)
    }

    Note: useable_boxes refers to boxes that you already know are big enough to
        fit at least ONE of each of the items. If you send in a box that is too
        small, you will be stuck in an infinite loop.
    '''
    packed_boxes = {}
    # sort items by longest dimension, longest first
    items_to_pack = sorted(unordered_items, key=lambda item: item.dimensions[2],
                          reverse=True)
    # pack the biggest items first then progressively pack the smaller ones
    for box_dict in useable_boxes:
        box = box_dict['box']
        packed_items = pack_boxes(box_dict['dimensions'], items_to_pack)
        # additional box starts as the last parcel

        additional_boxes = []
        additional_box = []
        for items in packed_items:
            # if the weight of the contents of the box are greater than the
            # given max weight
            while sum(item.weight for item in items) + box.weight_g > max_weight:
                if len(items) == 1:
                    raise APIError('SKU is too heavy: {}'
                                   .format(items[0].item_number))

                # TODO: Instead of removing the last SKU, remove the lightest
                # SKU, or the SKU that is closest in weight to the difference
                # between the current weight and the maximum weight.
                popped_item = items.pop()

                if ((sum(item.weight for item in additional_box) +
                        float(popped_item.weight) + box.weight_g) > max_weight):
                    # if the additional box weight + the last item is more than
                    # the max weight, start a new box
                    additional_boxes.append(additional_box)
                    additional_box = []

                additional_box.append(popped_item)

        if len(additional_box) > 0:
            additional_boxes.append(additional_box)

        packed_items += additional_boxes

        packed_boxes[box_dict['box']] = packed_items

    box_dictionary = setup_box_dictionary(packed_boxes, zone)

    # repack the last parcel into a smaller box
    if (box_dictionary['package'] is not None and
            len(box_dictionary['package'].items_per_box) > 1):
        package = box_dictionary['package']
        # repack the last parcels, see if they should go in a smaller box
        smallest_items_to_pack = package.items_per_box[-1]
        for box_dict in useable_boxes:
            # using non-flat rate boxes and those already smaller than the
            # currently set box
            smaller_box = box_dict['box']
            if (smaller_box.total_cubic_cm < package.box.total_cubic_cm):
                packed_items = pack_boxes(box_dict['dimensions'],
                                         smallest_items_to_pack)
                if len(packed_items) == 1:
                    box_dictionary['package'] = package._replace(
                        last_parcel=smaller_box)
                    break

    return box_dictionary
Ejemplo n.º 19
0
    def get_data(self, search, phases=None, fields=default_fields):
        """
        Retrieve data in JSON.
        JSON is expected to be valid against the schema
        at https://developer.mpds.io/mpds.schema.json

        Args:
            search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"},
                documented at https://developer.mpds.io/#Categories
            phases: (list) Phase IDs, according to the MPDS distinct phases concept
            fields: (dict) Data of interest for C-, S-, and P-entries,
                e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']},
                documented at https://developer.mpds.io/#JSON-schemata

        Returns:
            List of dicts: C-, S-, and P-entries, the format is
            documented at https://developer.mpds.io/#JSON-schemata
        """
        output = []
        fields = {
            key: [jmespath.compile(item) if isinstance(item, str) else item() for item in value]
            for key, value in fields.items()
        } if fields else None

        tot_count = 0

        phases = list(set(phases)) if phases else []

        if len(phases) > self.maxnphases:
            all_phases = array_split(phases, int(math.ceil(
                len(phases)/self.maxnphases
            )))
        else: all_phases = [phases]

        nsteps = len(all_phases)

        for step, current_phases in enumerate(all_phases, start=1):

            counter, hits_count = 0, 0

            while True:
                result = self._request(search, phases=list(current_phases), page=counter)
                if result['error']:
                    raise APIError(result['error'], result.get('code', 0))

                if result['npages'] > self.maxnpages:
                    raise APIError(
                        "Too many hits (%s > %s), please, be more specific" % \
                        (result['count'], self.maxnpages * self.pagesize),
                        2
                    )
                output.extend(self._massage(result['out'], fields))

                if hits_count and hits_count != result['count']:
                    raise APIError("API error: hits count has been changed during the query")

                hits_count = result['count']

                time.sleep(self.chillouttime)

                if counter == result['npages'] - 1:
                    break

                counter += 1

                if self.verbose:
                    sys.stdout.write("\r\t%d%% of step %s from %s" % (
                        (counter/result['npages']) * 100, step, nsteps)
                                    )
                    sys.stdout.flush()

            tot_count += hits_count

        if len(output) != tot_count:
            raise APIError("API error: collected and declared counts of hits differ")

        if self.verbose:
            sys.stdout.write(" Got %s hits\r\n" % tot_count)
            sys.stdout.flush()

        return output