Exemple #1
0
 def form_valid(self, form):
     product_ids_to_quantities = dict(
         (int(key.replace("q_", "")), value)
         for (key, value)
         in six.iteritems(form.cleaned_data)
         if key.startswith("q_") and value > 0
     )
     order = self.object
     product_map = Product.objects.in_bulk(set(product_ids_to_quantities.keys()))
     products_to_quantities = dict(
         (product_map[product_id], quantity)
         for (product_id, quantity)
         in six.iteritems(product_ids_to_quantities)
     )
     try:
         shipment = order.create_shipment(
             supplier=form.cleaned_data["supplier"],
             product_quantities=products_to_quantities
         )
     except NoProductsToShipException:
         messages.error(self.request, _("No products to ship."))
         return self.form_invalid(form)
     else:
         messages.success(self.request, _("Shipment %s created.") % shipment.id)
         return HttpResponseRedirect(get_model_url(order))
    def logger_file(self, value):
        """
        Sets the logger_file.

        If the logger_file is None, then add stream handler and remove file handler.
        Otherwise, add file handler and remove stream handler.

        :param value: The logger_file path.
        :type: str
        """
        self.__logger_file = value
        if self.__logger_file:
            # If set logging file,
            # then add file handler and remove stream handler.
            self.logger_file_handler = logging.FileHandler(self.__logger_file)
            self.logger_file_handler.setFormatter(self.logger_formatter)
            for _, logger in iteritems(self.logger):
                logger.addHandler(self.logger_file_handler)
                if self.logger_stream_handler:
                    logger.removeHandler(self.logger_stream_handler)
        else:
            # If not set logging file,
            # then add stream handler and remove file handler.
            self.logger_stream_handler = logging.StreamHandler()
            self.logger_stream_handler.setFormatter(self.logger_formatter)
            for _, logger in iteritems(self.logger):
                logger.addHandler(self.logger_stream_handler)
                if self.logger_file_handler:
                    logger.removeHandler(self.logger_file_handler)
    def to_dict(self):
        """
        Serialize the DSL object to plain dict
        """
        d = {}
        for pname, value in iteritems(self._params):
            pinfo = self._param_defs.get(pname)

            # typed param
            if pinfo and 'type' in pinfo:
                # don't serialize empty lists and dicts for typed fields
                if value in ({}, []):
                    continue

                # multi-values are serialized as list of dicts
                if pinfo.get('multi'):
                    value = list(map(lambda x: x.to_dict(), value))

                # squash all the hash values into one dict
                elif pinfo.get('hash'):
                    value = dict((k, v.to_dict()) for k, v in iteritems(value))

                # serialize single values
                else:
                    value = value.to_dict()

            # serialize anything with to_dict method
            elif hasattr(value, 'to_dict'):
                value = value.to_dict()

            d[pname] = value
        return {self.name: d}
    def record_derivatives(self, derivs, metadata):
        """Writes the derivatives that were calculated for the driver.

        Args
        ----
        derivs : dict
            Dictionary containing derivatives

        metadata : dict, optional
            Dictionary containing execution metadata (e.g. iteration coordinate).
        """

        iteration_coordinate = metadata['coord']
        timestamp = metadata['timestamp']

        write = self.out.write
        fmat = "Timestamp: {0!r}\n"
        write(fmat.format(timestamp))

        fmat = "Iteration Coordinate: {0:s}/Derivs\n"
        write(fmat.format(format_iteration_coordinate(iteration_coordinate)))

        write("Derivatives:\n")
        if isinstance(derivs, dict):
            for okey, sub in sorted(iteritems(derivs)):
                for ikey, deriv in sorted(iteritems(sub)):
                    write("  {0} wrt {1}: {2}\n".format(okey, ikey, str(deriv)))
        else:
            write("  {0} \n".format(str(derivs)))

        # Flush once per iteration to allow external scripts to process the data.
        self.out.flush()
    def __call_api(self, resource_path, method,
                   path_params=None, query_params=None, header_params=None,
                   body=None, post_params=None, files=None,
                   response_type=None, auth_settings=None, callback=None):

        # headers parameters
        header_params = header_params or {}
        header_params.update(self.default_headers)
        if self.cookie:
            header_params['Cookie'] = self.cookie
        if header_params:
            header_params = self.sanitize_for_serialization(header_params)

        # path parameters
        if path_params:
            path_params = self.sanitize_for_serialization(path_params)
            for k, v in iteritems(path_params):
                replacement = quote(str(self.to_path_value(v)))
                resource_path = resource_path.\
                    replace('{' + k + '}', replacement)

        # query parameters
        if query_params:
            query_params = self.sanitize_for_serialization(query_params)
            query_params = {k: self.to_path_value(v)
                            for k, v in iteritems(query_params)}

        # post parameters
        if post_params or files:
            post_params = self.prepare_post_parameters(post_params, files)
            post_params = self.sanitize_for_serialization(post_params)

        # auth setting
        self.update_params_for_auth(header_params, query_params, auth_settings)

        # body
        if body:
            body = self.sanitize_for_serialization(body)

        # request url
        url = self.host + resource_path

        # perform request and return response
        response_data = self.request(method, url,
                                     query_params=query_params,
                                     headers=header_params,
                                     post_params=post_params, body=body)

        self.last_response = response_data

        # deserialize response data
        if response_type:
            deserialized_data = self.deserialize(response_data, response_type)
        else:
            deserialized_data = None

        if callback:
            callback(deserialized_data)
        else:
            return deserialized_data
Exemple #6
0
    def _translate_section(self, section, sub_section, data, mapping):
        cfn_objects = {}
        obj_name = section[:-1]
        err_msg = _('"%%s" is not a valid keyword inside a %s '
                    'definition') % obj_name
        for name, attrs in six.iteritems(data):
            cfn_object = {}

            if not attrs:
                args = {'object_name': obj_name, 'sub_section': sub_section}
                message = _('Each %(object_name)s must contain a '
                            '%(sub_section)s key.') % args
                raise exception.StackValidationFailed(message=message)
            try:
                for attr, attr_value in six.iteritems(attrs):
                    cfn_attr = self._translate(attr, mapping, err_msg)
                    cfn_object[cfn_attr] = attr_value

                cfn_objects[name] = cfn_object
            except AttributeError:
                message = _('"%(section)s" must contain a map of '
                            '%(obj_name)s maps. Found a [%(_type)s] '
                            'instead') % {'section': section,
                                          '_type': type(attrs),
                                          'obj_name': obj_name}
                raise exception.StackValidationFailed(message=message)
            except KeyError as e:
                # an invalid keyword was found
                raise exception.StackValidationFailed(message=six.text_type(e))

        return cfn_objects
Exemple #7
0
    def test_driver_records_unknown_types_metadata(self):

        prob = Problem()
        root = prob.root = Group()

        # Need an optimization problem to test to make sure
        #   the is_desvar, is_con, is_obj metadata is being
        #   recorded for the Unknowns
        root.add('p1', IndepVarComp('x', 50.0))
        root.add('p2', IndepVarComp('y', 50.0))
        root.add('comp', Paraboloid())

        root.connect('p1.x', 'comp.x')
        root.connect('p2.y', 'comp.y')

        prob.driver = ScipyOptimizer()
        prob.driver.options['optimizer'] = 'SLSQP'
        prob.driver.add_desvar('p1.x', lower=-50.0, upper=50.0)
        prob.driver.add_desvar('p2.y', lower=-50.0, upper=50.0)

        prob.driver.add_objective('comp.f_xy')
        prob.driver.options['disp'] = False

        prob.driver.add_recorder(self.recorder)
        self.recorder.options['record_metadata'] = True
        prob.setup(check=False)
        prob.cleanup() # close recorders

        expected_params = list(iteritems(prob.root.params))
        expected_unknowns = list(iteritems(prob.root.unknowns))
        expected_resids = list(iteritems(prob.root.resids))

        self.assertMetadataRecorded((expected_params, expected_unknowns, expected_resids))
Exemple #8
0
def parse(functions, stack, snippet, path='', template=None):
    recurse = functools.partial(parse, functions, stack, template=template)

    if isinstance(snippet, collections.Mapping):
        def mkpath(key):
            return '.'.join([path, six.text_type(key)])

        if len(snippet) == 1:
            fn_name, args = next(six.iteritems(snippet))
            Func = functions.get(fn_name)
            if Func is not None:
                try:
                    path = '.'.join([path, fn_name])
                    if issubclass(Func, function.Macro):
                        return Func(stack, fn_name, args,
                                    functools.partial(recurse, path=path),
                                    template)
                    else:
                        return Func(stack, fn_name, recurse(args, path))
                except (ValueError, TypeError, KeyError) as e:
                    raise exception.StackValidationFailed(
                        path=path,
                        message=six.text_type(e))

        return dict((k, recurse(v, mkpath(k)))
                    for k, v in six.iteritems(snippet))
    elif (not isinstance(snippet, six.string_types) and
          isinstance(snippet, collections.Iterable)):

        def mkpath(idx):
            return ''.join([path, '[%d]' % idx])

        return [recurse(v, mkpath(i)) for i, v in enumerate(snippet)]
    else:
        return snippet
Exemple #9
0
def _resolve_with_state(unconflicted_state_ids, conflicted_state_ids, auth_event_ids,
                        state_map):
    conflicted_state = {}
    for key, event_ids in iteritems(conflicted_state_ids):
        events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map]
        if len(events) > 1:
            conflicted_state[key] = events
        elif len(events) == 1:
            unconflicted_state_ids[key] = events[0].event_id

    auth_events = {
        key: state_map[ev_id]
        for key, ev_id in iteritems(auth_event_ids)
        if ev_id in state_map
    }

    try:
        resolved_state = _resolve_state_events(
            conflicted_state, auth_events
        )
    except Exception:
        logger.exception("Failed to resolve state")
        raise

    new_state = unconflicted_state_ids
    for key, event in iteritems(resolved_state):
        new_state[key] = event.event_id

    return new_state
Exemple #10
0
 def get_data(self, parsed_args):
     self.log.debug('run(%s)', parsed_args)
     neutron_client = self.get_client()
     neutron_client.format = parsed_args.request_format
     _extra_values = neutronV20.parse_args_to_dict(self.values_specs)
     neutronV20._merge_args(self, parsed_args, _extra_values,
                            self.values_specs)
     body = self.args2body(parsed_args)
     if self.resource in body:
         body[self.resource].update(_extra_values)
     else:
         body[self.resource] = _extra_values
     obj_updator = getattr(neutron_client,
                           "update_%s" % self.resource)
     tenant_id = get_tenant_id(parsed_args.tenant_id,
                               neutron_client)
     data = obj_updator(tenant_id, body)
     if self.resource in data:
         for k, v in six.iteritems(data[self.resource]):
             if isinstance(v, list):
                 value = ""
                 for _item in v:
                     if value:
                         value += "\n"
                     if isinstance(_item, dict):
                         value += jsonutils.dumps(_item)
                     else:
                         value += str(_item)
                 data[self.resource][k] = value
             elif v is None:
                 data[self.resource][k] = ''
         return zip(*sorted(six.iteritems(data[self.resource])))
     else:
         return None
Exemple #11
0
 def get_objects(self):
     # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
     for modname, info in iteritems(self.data['modules']):
         yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
     for refname, (docname, type) in iteritems(self.data['objects']):
         if type != 'module':  # modules are already handled
             yield (refname, refname, type, docname, refname, 1)
Exemple #12
0
    def allowed_permissions(self, package):
        """
        Get all allowed permissions for all principals on a package

        Returns
        -------
        perms : dict
            Mapping of principal to tuple of permissions

        """
        all_perms = {}
        for user, perms in six.iteritems(self.user_permissions(package)):
            all_perms["user:"******"read",)
            for principal in groups_to_principals(self.default_write):
                if principal in all_perms:
                    all_perms[principal] += ("write",)
                else:
                    all_perms[principal] = ("write",)
        return all_perms
Exemple #13
0
 def get_data(self, parsed_args):
     self.log.debug('get_data(%s)', parsed_args)
     neutron_client = self.get_client()
     neutron_client.format = parsed_args.request_format
     tenant_id = get_tenant_id(parsed_args.tenant_id,
                               neutron_client)
     params = {}
     obj_shower = getattr(neutron_client,
                          "show_%s" % self.resource)
     data = obj_shower(tenant_id, **params)
     if self.resource in data:
         for k, v in six.iteritems(data[self.resource]):
             if isinstance(v, list):
                 value = ""
                 for _item in v:
                     if value:
                         value += "\n"
                     if isinstance(_item, dict):
                         value += jsonutils.dumps(_item)
                     else:
                         value += str(_item)
                 data[self.resource][k] = value
             elif v is None:
                 data[self.resource][k] = ''
         return zip(*sorted(six.iteritems(data[self.resource])))
     else:
         return None
Exemple #14
0
def parse_future_info(future_info):
    new_info = {}

    for underlying_symbol, info in six.iteritems(future_info):
        try:
            underlying_symbol = underlying_symbol.upper()
        except AttributeError:
            raise RuntimeError(_("Invalid future info: underlying_symbol {} is illegal.".format(underlying_symbol)))

        for field, value in six.iteritems(info):
            if field in (
                "open_commission_ratio", "close_commission_ratio", "close_commission_today_ratio"
            ):
                new_info.setdefault(underlying_symbol, {})[field] = float(value)
            elif field == "commission_type":
                if isinstance(value, six.string_types) and value.upper() == "BY_MONEY":
                    new_info.setdefault(underlying_symbol, {})[field] = COMMISSION_TYPE.BY_MONEY
                elif isinstance(value, six.string_types) and value.upper() == "BY_VOLUME":
                    new_info.setdefault(underlying_symbol, {})[field] = COMMISSION_TYPE.BY_VOLUME
                elif isinstance(value, COMMISSION_TYPE):
                    new_info.setdefault(underlying_symbol, {})[field] = value
                else:
                    raise RuntimeError(_(
                        "Invalid future info: commission_type is suppose to be BY_MONEY or BY_VOLUME"
                    ))
            else:
                raise RuntimeError(_("Invalid future info: field {} is not valid".format(field)))
    return new_info
Exemple #15
0
    def __setstate__(self, state):
        """ Overwrite message state with given kwargs. """
        self._tokens = state['tokens']
        self.extra = {}
        self.expiry = state['expiry']
        self.priority = state['priority']

        if 'payload' in state:
            self._payload = state['payload']
            if hasattr(self._payload, "get") and self._payload.get("aps"):
                aps = self._payload["aps"]
                self.alert = aps.get("alert")
                self.badge = aps.get("badge")
                self.sound = aps.get("sound")
                self.content_available = aps.get("content-available")
                self.extra = dict([(k, v) for (k, v) in six.iteritems(self._payload) if k != 'aps'])
        else:
            self._payload = None
            for key, val in six.iteritems(state):
                if key in ('tokens', 'expiry', 'priority'):  # already set
                    pass
                elif key in ('alert', 'badge', 'sound', 'content_available'):
                    setattr(self, key, state[key])
                elif key == 'extra':
                    self.extra.update(state[key])
                else:
                    # legacy serialized object
                    self.extra[key] = val
Exemple #16
0
    def _write_f06_transient(self, header, page_stamp, page_num=1, f=None, is_mag_phase=False):
        assert f is not None
        words = ['                         C O M P L E X   F O R C E S   O F   M U L T I   P O I N T   C O N S T R A I N T\n',
                 '                                                          (REAL/IMAGINARY)\n',
                 ' \n',
                 '      POINT ID.   TYPE          T1             T2             T3             R1             R2             R3\n']
        msg = []
        for dt, translations in sorted(iteritems(self.translations)):
            header[1] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
            msg += header + words
            for nodeID, translation in sorted(iteritems(translations)):
                rotation = self.rotations[dt][nodeID]
                grid_type = self.gridTypes[nodeID]

                (dx, dy, dz) = translation
                (rx, ry, rz) = rotation

                vals = [dx, dy, dz, rx, ry, rz]
                (vals2, is_all_zeros) = writeImagFloats13E(vals, is_mag_phase)
                #if not is_all_zeros:
                [v1r, v2r, v3r, v4r, v5r, v6r, v1i,
                    v2i, v3i, v4i, v5i, v6i] = vals2
                msg.append('0%13i %6s     %-13s  %-13s  %-13s  %-13s  %-13s  %s\n' % (nodeID, grid_type, v1r, v2r, v3r, v4r, v5r, v6r.rstrip()))
                msg.append(' %13i %6s     %-13s  %-13s  %-13s  %-13s  %-13s  %s\n' % (nodeID, grid_type, v1i, v2i, v3i, v4i, v5i, v6i.rstrip()))

            msg.append(page_stamp % page_num)
            f.write(''.join(msg))
            page_num += 1
        return page_num - 1
Exemple #17
0
def _write_f06_springs_transient(f, stress, header, words, name):
    for dt, datai in sorted(iteritems(data)):
        header[1] = ' %s = %10.4E\n' % (name, dt)
        msg += header + words
        f.write(''.join(msg))

        eids = []
        stresses = []
        for eid, stress in sorted(iteritems(datai)):
            eids.append(eid)
            stresses.append(stress)
            if len(stresses) == 4:
                stresses, is_all_zeros = writeFloats13E(stresses)
                f.write('    %10i  %13s    %10i  %13s    %10i  %13s    %10i  %13s\n' % (
                    eids[0], stresses[0],
                    eids[1], stresses[1],
                    eids[2], stresses[2],
                    eids[3], stresses[3]))
                eids = []
                stresses = []

        if stresses:
            line = '    '
            stresses, is_all_zeros = writeFloats13E(stresses)
            for eid, stress in zip(eids, stresses):
                line += '%10i  %13s    ' % (eid, stress)
            f.write(line.rstrip() + '\n')

            msg.append(page_stamp % page_num)
            f.write(''.join(msg))
            msg = ['']
        page_num += 1

    return page_num - 1
def _xform_availability_zone(az, include_extra):
    result = []
    zone_info = {}
    if hasattr(az, "zoneState"):
        zone_info["zone_status"] = "available" if az.zoneState["available"] else "not available"
    if hasattr(az, "zoneName"):
        zone_info["zone_name"] = az.zoneName

    if not include_extra:
        result.append(zone_info)
        return result

    if hasattr(az, "hosts") and az.hosts:
        for host, services in six.iteritems(az.hosts):
            host_info = copy.deepcopy(zone_info)
            host_info["host_name"] = host

            for svc, state in six.iteritems(services):
                info = copy.deepcopy(host_info)
                info["service_name"] = svc
                info["service_status"] = "%s %s %s" % (
                    "enabled" if state["active"] else "disabled",
                    ":-)" if state["available"] else "XXX",
                    state["updated_at"],
                )
                result.append(info)
    else:
        zone_info["host_name"] = ""
        zone_info["service_name"] = ""
        zone_info["service_status"] = ""
        result.append(zone_info)
    return result
Exemple #19
0
    def _write_input(self, input_dir="."):
        """
        Write the packmol input file to the input directory.

        Args:
            input_dir (string): path to the input directory
        """
        with open(os.path.join(input_dir, self.input_file), 'wt', encoding="utf-8") as inp:
            for k, v in six.iteritems(self.control_params):
                inp.write('{} {}\n'.format(k, self._format_param_val(v)))
            # write the structures of the constituent molecules to file and set
            # the molecule id and the corresponding filename in the packmol
            # input file.
            for idx, mol in enumerate(self.mols):
                a = BabelMolAdaptor(mol)
                pm = pb.Molecule(a.openbabel_mol)
                filename = os.path.join(
                    input_dir, '{}.{}'.format(
                        idx, self.control_params["filetype"])).encode("ascii")
                pm.write(self.control_params["filetype"], filename=filename,
                         overwrite=True)
                inp.write("\n")
                inp.write(
                    "structure {}.{}\n".format(
                        os.path.join(input_dir, str(idx)),
                        self.control_params["filetype"]))
                for k, v in six.iteritems(self.param_list[idx]):
                    inp.write('  {} {}\n'.format(k, self._format_param_val(v)))
                inp.write('end structure\n')
Exemple #20
0
    def convex_cardinality_wrapper(*args, **kwargs):
        def dict_result(r):
            if isinstance(r, tuple):
                return dict(r[0])
            return dict(r)

        # Initial run with default weights
        full_result = f(*args, **kwargs)
        result = dict_result(full_result)

        def update_weight(value):
            return 1/(epsilon + abs(value))

        # Iterate until the difference from one iteration to
        # the next is less than epsilon.
        while True:
            weights = {identifier: update_weight(value)
                       for identifier, value in iteritems(result)}
            kwargs['weights'] = weights

            last_result = result
            full_result = f(*args, **kwargs)
            result = dict_result(full_result)

            delta = math.sqrt(sum(pow(value - last_result[identifier], 2)
                                  for identifier, value in iteritems(result)))
            if delta < epsilon:
                break

        if isinstance(full_result, tuple):
            return (iteritems(result),) + full_result[1:]
        return iteritems(result)
Exemple #21
0
 def _blobnames_from_toplevelname(self, toplevelname, ilk=None):
     """Yield all blobnames in the currently selected catalogs
     with the given toplevelname.
     
     If "ilk" is given then only symbols of that ilk will be considered.
     """
     # toplevelname_index: {lang -> ilk -> toplevelname -> res_id -> blobnames}
     if self.lang in self.catalogs_zone.toplevelname_index:
         for i, potential_bfrft \
             in six.iteritems(self.catalogs_zone.toplevelname_index[self.lang]):
             if ilk is not None and i != ilk:
                 continue
             if toplevelname not in potential_bfrft:
                 continue
             potential_bfr = potential_bfrft[toplevelname]
             if self.selection_res_id_set is None:
                 for blobnames in six.itervalues(potential_bfr):
                     for blobname in blobnames:
                         yield blobname
             else:
                 for res_id, blobnames in six.iteritems(potential_bfr):
                     if res_id not in self.selection_res_id_set:
                         continue
                     for blobname in blobnames:
                         yield blobname
Exemple #22
0
    def load_shabp_results(self, shabp_filename, dirname):
        Cpd, deltad = self.model.read_shabp_out(shabp_filename)

        cases = self.result_cases
        icase = len(cases)
        mach_results = []
        form = self.form
        form.append(('Results', None, mach_results))
        #self.result_cases = {}
        mach_forms = {}
        for case_id, Cp in sorted(iteritems(Cpd)):
            Cp = Cpd[case_id]
            #delta = deltad[case_id]

            mach, alpha, beta = self.model.shabp_cases[case_id]
            #name = 'Mach=%g Alpha=%g' % (mach, alpha)
            name = 'Mach=%g Alpha=%g' % (mach, alpha)
            cases[(name, icase, 'Cp', 1, 'centroid', '%.3f', '')] = Cp
            cp_form = [
                ('Cp', icase, [])
            ]
            mach_forms[mach].append(('Cp', None, cp_form))
            #self.result_cases[(name, 'delta', 1, 'centroid', '%.3f')] = delta

        for mach, mach_form in sorted(iteritems(mach_forms)):
            mach_results.append(mach_form)
        self._finish_results_io2(form, cases)
Exemple #23
0
    def setMetadata(self, folder, metadata):
        """
        Set metadata on a folder.  A rest exception is thrown in the cases
        where the metadata json object is badly formed, or if any of the
        metadata keys contains a period ('.').

        :param folder: The folder to set the metadata on.
        :type folder: dict
        :param metadata: A dictionary containing key-value pairs to add to
                     the folder's meta field
        :type metadata: dict
        :returns: the folder document
        """
        if 'meta' not in folder:
            folder['meta'] = {}

        # Add new metadata to existing metadata
        folder['meta'].update(six.iteritems(metadata))

        # Remove metadata fields that were set to null (use items in py3)
        folder['meta'] = {k: v
                          for k, v in six.iteritems(folder['meta'])
                          if v is not None}

        folder['updated'] = datetime.datetime.utcnow()

        # Validate and save the item
        return self.save(folder)
Exemple #24
0
def encode_task_value(value, query=False):
    if value is None:
        value = ''
    elif isinstance(value, datetime.datetime):
        if not value.tzinfo:
            #  Dates not having timezone information should be
            #  assumed to be in local time
            value = value.replace(tzinfo=dateutil.tz.tzlocal())
        #  All times should be converted to UTC before serializing
        value = value.astimezone(pytz.utc).strftime(DATE_FORMAT)
    elif isinstance(value, datetime.date):
        value = value.strftime(DATE_FORMAT)
    elif isinstance(value, six.string_types):
        if query:
            # In some contexts, parentheses are interpreted for use in
            # logical expressions.  They must *sometimes* be escaped.
            for left, right in six.iteritems(logical_replacements):
                value = value.replace(left, right)
        else:
            for unsafe, safe in six.iteritems(
                encode_replacements_experimental
            ):
                value = value.replace(unsafe, safe)
    else:
        value = str(value)
    return value
Exemple #25
0
    def learn(self, my_dict, aplot=None):
        if my_dict is None:
            logging.critical("Cannot learn function with empty dict")
            return lambda _: 1, 0
        d_dict = dict()
        samples, thresholds = [], []
        for k, v in six.iteritems(my_dict):
            for o in (_ for _ in v if _):
                dnearest = np.array(np.load("{}.npz".format(o))['X']).reshape(
                    -1, 1)
                var = np.var(dnearest)
                if var == 0:
                    continue
                med = np.median(dnearest)
                mean, _, _, h = mean_confidence_interval(dnearest)
                samples.append(dnearest.shape[0])
                d_dict.setdefault(o.split('/')[0], dict()).setdefault(k, [med, h])

                # for the threshold, fit a gaussian (unused for AP)
                thresholds.append(_gaussian_fit(dnearest))
        if len(d_dict) < 1:
            logging.critical("dictionary is empty")
            return lambda _: 1, 0
        for k, v in six.iteritems(d_dict):  # there is only one
            xdata = np.array(sorted(v))
            ydata = np.array([np.mean(v[x][0]) for x in xdata])
            yerr = np.array([np.mean(v[x][1]) for x in xdata])

        # Take only significant values, higher than 0
        mask = ydata > 0
        xdata = xdata[mask]
        if xdata.shape[0] < 2:
            logging.critical("Too few points to learn function")
            # no correction can be applied
            return lambda _: 1, 0

        ydata = ydata[mask]
        ydata = ydata[0] / ydata  # normalise
        yerr = yerr[mask]

        order = min(self.order, xdata.shape[0] - 1)
        warnings.filterwarnings("ignore")
        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            try:
                poly = np.poly1d(np.polyfit(
                    xdata, ydata, order, w=1. / (yerr + 1e-15)))
            except np.RankWarning:
                logging.critical(
                    "Cannot fit polynomial with degree %d, npoints %d",
                    order, xdata.shape[0])
                return lambda _: 1, 0

        if self.aplot is not None:
            plot_learning_function(xdata, ydata, yerr, order, self.aplot, poly)

        # poly = partial(model, res.x)
        return poly, 1 - (filter(
            lambda x: x > 0,
            np.array(thresholds)[np.array(samples).argsort()[::-1]]) or [0])[0]
Exemple #26
0
    def _resolve_value(self, value):
        if isinstance(value, six.string_types):
            if value[0:2] == "<<":
                return value[1:]
            if value[0:1] == "<" and ">" in value:
                service_name, _, attrs = value[1:].rpartition(">")
                service = self._resolve_service(service_name)
                attrs = [a for a in attrs.split('.') if a]
                return six.moves.reduce(getattr, attrs, service)

        elif isinstance(value, dict):
            if self.factory_key in value:
                factory, args = self._resolve_value(value[self.factory_key]), []
                if isinstance(factory, list):
                    factory, args = factory[0], factory[1:]
                if isinstance(factory, six.string_types):
                    factory = import_name(factory)
                kwargs = {}
                for (dict_key, dict_value) in six.iteritems(value):
                    if dict_key != self.factory_key:
                        kwargs[dict_key] = self._resolve_value(dict_value)
                try:
                    return factory(*args, **kwargs)
                except Exception as err:
                    raise_and_annotate(err, "While calling factory '%s'" % value[self.factory_key])
            return {dict_key: self._resolve_value(dict_value)
                    for (dict_key, dict_value) in six.iteritems(value)}

        elif isinstance(value, list):
            return [self._resolve_value(item) for item in value]

        return value
Exemple #27
0
    def get_group_tag_values_for_users(self, event_users, limit=100):
        start, end = self.get_time_range()
        filters = {
            'project_id': [eu.project_id for eu in event_users]
        }
        conditions = [
            ['tags[sentry:user]', 'IN', filter(None, [eu.tag_value for eu in event_users])]
        ]
        aggregations = [
            ['count()', '', 'times_seen'],
            ['min', SEEN_COLUMN, 'first_seen'],
            ['max', SEEN_COLUMN, 'last_seen'],
        ]

        result = snuba.query(start, end, ['issue', 'user_id'], conditions, filters,
                             aggregations, orderby='-last_seen', limit=limit,
                             referrer='tagstore.get_group_tag_values_for_users')

        values = []
        for issue, users in six.iteritems(result):
            for name, data in six.iteritems(users):
                values.append(
                    GroupTagValue(
                        group_id=issue,
                        key='sentry:user',
                        value=name,
                        **fix_tag_value_data(data)
                    )
                )
        return values
Exemple #28
0
    def __init__(self, query_dict=None, q=None, fq=None, fl=DEFAULT_FIELDS,
                 sort=None, start=0, rows=50, max_pages=1, token=None, **kwargs):
        """
        constructor
        :param query_dict: raw query that will be sent unmodified. raw takes
            precedence over individually defined query params
        :type query_dict: dict
        :param q: solr "q" param (query)
        :param fq: solr "fq" param (filter query)
        :param fl: solr "fl" param (filter limit)
        :param sort: solr "sort" param (sort)
        :param start: solr "start" param (start)
        :param rows: solr "rows" param (rows)
        :param max_pages: Maximum number of pages to return. This value may
            be modified after instantiation to increase the number of results
        :param token: optional API token to use for this searchquery
        :param kwargs: kwargs to add to `q` as "key:value"
        """
        self._articles = []
        self.response = None  # current SolrResponse object
        self.max_pages = max_pages
        self.__iter_counter = 0  # Counter for our custom iterator method
        if query_dict is not None:
            query_dict.setdefault('rows', 50)
            query_dict.setdefault('start', 0)
            self._query = query_dict
        else:
            if sort is not None:
                sort = sort if " " in sort else "{} desc".format(sort)
            _ = {
                "q": q or '',
                "fq": fq,
                "fl": fl,
                "sort": sort,
                "start": start,
                "rows": int(rows),
            }
            # Filter out None values
            self._query = dict(
                (k, v) for k, v in six.iteritems(_) if v is not None
            )

            # Include `id` as a field, always (could be None, string or list)
            self._query.setdefault("fl", ["id"])
            if isinstance(self._query["fl"], six.string_types):
                _ = map(str.strip, self._query["fl"].split(","))
                self._query["fl"] = ["id"] + list(_)
            else:
                self._query["fl"] = ["id"] + self._query["fl"]

            # Format and add kwarg (key, value) pairs to q
            if kwargs:
                _ = ['{}:"{}"'.format(k, v) for k, v in six.iteritems(kwargs)]
                self._query['q'] = '{} {}'.format(self._query['q'], ' '.join(_))

        assert self._query.get('rows') > 0, "rows must be greater than 0"
        assert self._query.get('q'), "q must not be empty"

        if token is not None:
            self.token = token
Exemple #29
0
    def outputconflicts(self, options):
        """saves the result of the conflict match"""
        print("%d/%d different strings have conflicts" % (len(self.conflictmap), len(self.textmap)))
        reducedmap = {}

        def str_len(x):
            return len(x)

        for source, translations in six.iteritems(self.conflictmap):
            words = source.split()
            words.sort(key=str_len)
            source = words[-1]
            reducedmap.setdefault(source, []).extend(translations)
        # reduce plurals
        plurals = {}
        for word in reducedmap:
            if word + "s" in reducedmap:
                plurals[word] = word + "s"
        for word, pluralword in six.iteritems(plurals):
            reducedmap[word].extend(reducedmap.pop(pluralword))
        for source, translations in six.iteritems(reducedmap):
            flatsource = self.flatten(source, "-")
            fulloutputpath = os.path.join(options.output, flatsource + os.extsep + "po")
            conflictfile = po.pofile()
            for target, unit, filename in translations:
                unit.othercomments.append("# (poconflicts) %s\n" % filename)
                conflictfile.units.append(unit)
            with open(fulloutputpath, "wb") as fh:
                conflictfile.serialize(fh)
Exemple #30
0
def invert_dict(d, many = False):
    """
        Returns the inversion of keys and values in dictionary d. The default case
        allows overriding of values pointed to by multiple keys.  Passing many=True
        will make each value a key for the list of values pointing to it.

        Examples:
        invert_dict({ 1 : 2, 3 : 4 })
        { 2 : 1, 4 : 3 }

        invert_dict({ 1 : 2, 2 : 2, 3 : 2 })
        { 2 : 3 }

        invert_dict({ 1 : 2, 2 : 2, 3 : 2 }, many=True)
        { 2 : [ 1, 2, 3 ] }
    """
    if not many:
        return { v : k for k, v in six.iteritems(d) }

    output = collections.defaultdict(list)

    for k, v in six.iteritems(d):
        output[v].append(k)

    return dict(output)
    def update_request_log_settings_with_http_info(self, **kwargs):
        """
        Enables or disables API request logging for troubleshooting.
        Enables or disables API request logging for troubleshooting.  When enabled (`apiRequestLogging` is set to true), REST API requests and responses for the user are added to a log. A log can have up to 50 requests/responses and the current number of log entries can be determined by getting the settings. Logging is automatically disabled when the log limit of 50 is reached.  You can call [ML:GetRequestLog] or [ML:GetRequestLogs] to download the log files (individually or as a zip file). Call [ML:DeleteRequestLogs] to clear the log by deleting current entries.  Private information, such as passwords and integrator key information, which is normally located in the call header is omitted from the request/response log.  ###### Note: API request logging only captures requests from the authenticated user. Any call that does not authenticate the user and resolve a userId isn't logged. Meaning that login_information, NewAccounts, or other distributor-credential calls are not logged. 
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.update_request_log_settings_with_http_info(callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param DiagnosticsSettingsInformation diagnostics_settings_information:
        :return: DiagnosticsSettingsInformation
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['diagnostics_settings_information']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method update_request_log_settings" % key)
            params[key] = val
        del params['kwargs']

        collection_formats = {}

        resource_path = '/v2/diagnostics/settings'.replace('{format}', 'json')
        path_params = {}

        query_params = {}

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        if 'diagnostics_settings_information' in params:
            body_params = params['diagnostics_settings_information']
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # Authentication setting
        auth_settings = []

        return self.api_client.call_api(
            resource_path,
            'PUT',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='DiagnosticsSettingsInformation',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #32
0
    def get_reminder(self, alert_token, **kwargs):
        # type: (str, **Any) -> Union[ApiResponse, GetReminderResponse, Error]
        """
        This API is invoked by the skill to get a single reminder. 

        :param alert_token: (required) 
        :type alert_token: str
        :param full_response: Boolean value to check if response should contain headers and status code information.
            This value had to be passed through keyword arguments, by default the parameter value is set to False. 
        :type full_response: boolean
        :rtype: Union[ApiResponse, GetReminderResponse, Error]
        """
        operation_name = "get_reminder"
        params = locals()
        for key, val in six.iteritems(params['kwargs']):
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'alert_token' is set
        if ('alert_token' not in params) or (params['alert_token'] is None):
            raise ValueError(
                "Missing the required parameter `alert_token` when calling `" + operation_name + "`")

        resource_path = '/v1/alerts/reminders/{alertToken}'
        resource_path = resource_path.replace('{format}', 'json')

        path_params = {}  # type: Dict
        if 'alert_token' in params:
            path_params['alertToken'] = params['alert_token']

        query_params = []  # type: List

        header_params = []  # type: List

        body_params = None
        header_params.append(('Content-type', 'application/json'))

        # Response Type
        full_response = False
        if 'full_response' in params:
            full_response = params['full_response']

        # Authentication setting
        authorization_value = "Bearer " + self._authorization_value
        header_params.append(("Authorization", authorization_value))

        error_definitions = []  # type: List
        error_definitions.append(ServiceClientResponse(response_type="ask_sdk_model.services.reminder_management.get_reminder_response.GetReminderResponse", status_code=200, message="Success"))
        error_definitions.append(ServiceClientResponse(response_type="ask_sdk_model.services.reminder_management.error.Error", status_code=401, message="UserAuthenticationException. Request is not authorized/authenticated e.g. If customer does not have permission to create a reminder."))
        error_definitions.append(ServiceClientResponse(response_type="ask_sdk_model.services.reminder_management.error.Error", status_code=429, message="RateExceededException e.g. When the skill is throttled for exceeding the max rate"))
        error_definitions.append(ServiceClientResponse(response_type="ask_sdk_model.services.reminder_management.error.Error", status_code=500, message="Internal Server Error"))

        api_response = self.invoke(
            method="GET",
            endpoint=self._api_endpoint,
            path=resource_path,
            path_params=path_params,
            query_params=query_params,
            header_params=header_params,
            body=body_params,
            response_definitions=error_definitions,
            response_type="ask_sdk_model.services.reminder_management.get_reminder_response.GetReminderResponse")

        if full_response:
            return api_response
        return api_response.body
Exemple #33
0
def draw_boxes_and_labels(
        boxes,
        classes,
        scores,
        category_index,
        instance_masks=None,
        keypoints=None,
        max_boxes_to_draw=20,
        min_score_thresh=.5,
        agnostic_mode=False):
    """Returns boxes coordinates, class names and colors

    Args:
      boxes: a numpy array of shape [N, 4]
      classes: a numpy array of shape [N]
      scores: a numpy array of shape [N] or None.  If scores=None, then
        this function assumes that the boxes to be plotted are groundtruth
        boxes and plot all boxes as black with no classes or scores.
      category_index: a dict containing category dictionaries (each holding
        category index `id` and category name `name`) keyed by category indices.
      instance_masks: a numpy array of shape [N, image_height, image_width], can
        be None
      keypoints: a numpy array of shape [N, num_keypoints, 2], can
        be None
      max_boxes_to_draw: maximum number of boxes to visualize.  If None, draw
        all boxes.
      min_score_thresh: minimum score threshold for a box to be visualized
      agnostic_mode: boolean (default: False) controlling whether to evaluate in
        class-agnostic mode or not.  This mode will display scores but ignore
        classes.
    """
    # Create a display string (and color) for every box location, group any boxes
    # that correspond to the same location.
    box_to_display_str_map = collections.defaultdict(list)
    box_to_color_map = collections.defaultdict(str)
    box_to_instance_masks_map = {}
    box_to_keypoints_map = collections.defaultdict(list)
    if not max_boxes_to_draw:
        max_boxes_to_draw = boxes.shape[0]
    for i in range(min(max_boxes_to_draw, boxes.shape[0])):
        if scores is None or scores[i] > min_score_thresh:
            box = tuple(boxes[i].tolist())
            if instance_masks is not None:
                box_to_instance_masks_map[box] = instance_masks[i]
            if keypoints is not None:
                box_to_keypoints_map[box].extend(keypoints[i])
            if scores is None:
                box_to_color_map[box] = 'black'
            else:
                if not agnostic_mode:
                    if classes[i] in category_index.keys():
                        class_name = category_index[classes[i]]['name']
                    else:
                        class_name = 'N/A'
                    display_str = '{}: {}%'.format(
                        class_name,
                        int(100 * scores[i]))
                else:
                    display_str = 'score: {}%'.format(int(100 * scores[i]))
                box_to_display_str_map[box].append(display_str)
                if agnostic_mode:
                    box_to_color_map[box] = 'DarkOrange'
                else:
                    box_to_color_map[box] = standard_colors()[
                        classes[i] % len(standard_colors())]

    # Store all the coordinates of the boxes, class names and colors
    color_rgb = color_name_to_rgb()
    rect_points = []
    class_names = []
    class_colors = []
    for box, color in six.iteritems(box_to_color_map):
        ymin, xmin, ymax, xmax = box
        rect_points.append(dict(ymin=ymin, xmin=xmin, ymax=ymax, xmax=xmax))
        class_names.append(box_to_display_str_map[box])
        class_colors.append(color_rgb[color.lower()])
    return rect_points, class_names, class_colors
Exemple #34
0
    def get_with_http_info(self, **kwargs):  # noqa: E501
        """Get the EULA  # noqa: E501

          # noqa: E501
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please pass async=True
        >>> thread = api.get_with_http_info(async=True)
        >>> result = thread.get()

        :param async bool
        :return: Eula
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = []  # noqa: E501
        all_params.append('async')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in six.iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError(
                    "Got an unexpected keyword argument '%s'"
                    " to method get" % key
                )
            params[key] = val
        del params['kwargs']

        collection_formats = {}

        path_params = {}

        query_params = []

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.select_header_accept(
            ['application/json'])  # noqa: E501

        # Authentication setting
        auth_settings = []  # noqa: E501

        return self.api_client.call_api(
            '/api/v10/eula', 'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='Eula',  # noqa: E501
            auth_settings=auth_settings,
            async=params.get('async'),
            model_package="cloudera.director.v10.models",
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #35
0
    def RunCommand(self,
                   command_name,
                   args=None,
                   headers=None,
                   debug=0,
                   return_stdout=False,
                   return_stderr=False,
                   return_log_handler=False,
                   cwd=None):
        """Method for calling gslib.command_runner.CommandRunner.

    Passes parallel_operations=False for all tests, optionally saving/returning
    stdout output. We run all tests multi-threaded, to exercise those more
    complicated code paths.
    TODO: Change to run with parallel_operations=True for all tests. At
    present when you do this it causes many test failures.

    Args:
      command_name: The name of the command being run.
      args: Command-line args (arg0 = actual arg, not command name ala bash).
      headers: Dictionary containing optional HTTP headers to pass to boto.
      debug: Debug level to pass in to boto connection (range 0..3).
      return_stdout: If True, will save and return stdout produced by command.
      return_stderr: If True, will save and return stderr produced by command.
      return_log_handler: If True, will return a MockLoggingHandler instance
           that was attached to the command's logger while running.
      cwd: The working directory that should be switched to before running the
           command. The working directory will be reset back to its original
           value after running the command. If not specified, the working
           directory is left unchanged.

    Returns:
      One or a tuple of requested return values, depending on whether
      return_stdout, return_stderr, and/or return_log_handler were specified.
      Return Types:
        stdout - binary
        stderr - str (binary in Py2, text in Py3)
        log_handler - MockLoggingHandler
    """
        args = args or []

        command_line = six.ensure_text(' '.join([command_name] + args))
        if self.is_debugging:
            print_to_fd('\nRunCommand of {}\n'.format(command_line),
                        file=self.stderr_save)

        # Save and truncate stdout and stderr for the lifetime of RunCommand. This
        # way, we can return just the stdout and stderr that was output during the
        # RunNamedCommand call below.
        sys.stdout.seek(0)
        sys.stderr.seek(0)
        stdout = sys.stdout.read()
        stderr = sys.stderr.read()
        if stdout:
            self.accumulated_stdout.append(stdout)
        if stderr:
            self.accumulated_stderr.append(stderr)
        sys.stdout.seek(0)
        sys.stderr.seek(0)
        sys.stdout.truncate()
        sys.stderr.truncate()

        mock_log_handler = MockLoggingHandler()
        logging.getLogger(command_name).addHandler(mock_log_handler)
        if debug:
            logging.getLogger(command_name).setLevel(logging.DEBUG)

        try:
            with WorkingDirectory(cwd):
                self.command_runner.RunNamedCommand(command_name,
                                                    args=args,
                                                    headers=headers,
                                                    debug=debug,
                                                    parallel_operations=False,
                                                    do_shutdown=False)
        finally:
            sys.stdout.seek(0)
            sys.stderr.seek(0)
            if six.PY2:
                stdout = sys.stdout.read()
                stderr = sys.stderr.read()
            else:
                try:
                    stdout = sys.stdout.read()
                    stderr = sys.stderr.read()
                except UnicodeDecodeError:
                    sys.stdout.seek(0)
                    sys.stderr.seek(0)
                    stdout = sys.stdout.buffer.read().decode(UTF8)
                    stderr = sys.stderr.buffer.read().decode(UTF8)
            logging.getLogger(command_name).removeHandler(mock_log_handler)
            mock_log_handler.close()

            log_output = '\n'.join(
                '%s:\n  ' % level + '\n  '.join(records)
                for level, records in six.iteritems(mock_log_handler.messages)
                if records)

            _id = six.ensure_text(self.id())
            if self.is_debugging and log_output:
                print_to_fd('==== logging RunCommand {} {} ====\n'.format(
                    _id, command_line),
                            file=self.stderr_save)
                print_to_fd(log_output, file=self.stderr_save)
                print_to_fd('\n==== end logging ====\n', file=self.stderr_save)
            if self.is_debugging and stdout:
                print_to_fd('==== stdout RunCommand {} {} ====\n'.format(
                    _id, command_line),
                            file=self.stderr_save)
                print_to_fd(stdout, file=self.stderr_save)
                print_to_fd('==== end stdout ====\n', file=self.stderr_save)
            if self.is_debugging and stderr:
                print_to_fd('==== stderr RunCommand {} {} ====\n'.format(
                    _id, command_line),
                            file=self.stderr_save)
                print_to_fd(stderr, file=self.stderr_save)
                print_to_fd('==== end stderr ====\n', file=self.stderr_save)

            # Reset stdout and stderr files, so that we won't print them out again
            # in tearDown if debugging is enabled.
            sys.stdout.seek(0)
            sys.stderr.seek(0)
            sys.stdout.truncate()
            sys.stderr.truncate()

        to_return = []
        if return_stdout:
            to_return.append(stdout)
        if return_stderr:
            to_return.append(stderr)
        if return_log_handler:
            to_return.append(mock_log_handler)
        if len(to_return) == 1:
            return to_return[0]
        return tuple(to_return)
Exemple #36
0
    def collection_hits_get_collection_hit_with_http_info(
            self, data_view_name, collection_hit_id, **kwargs):  # noqa: E501
        """Requires OrbitAdmin: Returns details for a given collection hit.  # noqa: E501

        This endpoint is only available for users with the OrbitAdmin role  # noqa: E501
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please pass async_req=True
        >>> thread = api.collection_hits_get_collection_hit_with_http_info(data_view_name, collection_hit_id, async_req=True)
        >>> result = thread.get()

        :param async_req bool: execute request asynchronously
        :param str data_view_name: The name of the DataView to act on (required)
        :param int collection_hit_id: The id of the hit (required)
        :param _return_http_data_only: response data without head status code
                                       and headers
        :param _preload_content: if False, the urllib3.HTTPResponse object will
                                 be returned without reading/decoding response
                                 data. Default is True.
        :param _request_timeout: timeout setting for this request. If one
                                 number provided, it will be total request
                                 timeout. It can also be a pair (tuple) of
                                 (connection, read) timeouts.
        :return: tuple(CollectionHitDetail, status_code(int), headers(HTTPHeaderDict))
                 If the method is called asynchronously,
                 returns the request thread.
        """

        local_var_params = locals()

        all_params = ['data_view_name', 'collection_hit_id']  # noqa: E501
        all_params.append('async_req')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        for key, val in six.iteritems(local_var_params['kwargs']):
            if key not in all_params:
                raise ApiTypeError(
                    "Got an unexpected keyword argument '%s'"
                    " to method collection_hits_get_collection_hit" % key)
            local_var_params[key] = val
        del local_var_params['kwargs']
        # verify the required parameter 'data_view_name' is set
        if ('data_view_name' not in local_var_params
                or local_var_params['data_view_name'] is None):
            raise ApiValueError(
                "Missing the required parameter `data_view_name` when calling `collection_hits_get_collection_hit`"
            )  # noqa: E501
        # verify the required parameter 'collection_hit_id' is set
        if ('collection_hit_id' not in local_var_params
                or local_var_params['collection_hit_id'] is None):
            raise ApiValueError(
                "Missing the required parameter `collection_hit_id` when calling `collection_hits_get_collection_hit`"
            )  # noqa: E501

        collection_formats = {}

        path_params = {}
        if 'data_view_name' in local_var_params:
            path_params['dataViewName'] = local_var_params[
                'data_view_name']  # noqa: E501
        if 'collection_hit_id' in local_var_params:
            path_params['collectionHitId'] = local_var_params[
                'collection_hit_id']  # noqa: E501

        query_params = []

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.select_header_accept(
            ['application/json', 'text/json', 'application/xml',
             'text/xml'])  # noqa: E501

        # Authentication setting
        auth_settings = ['faststats_auth']  # noqa: E501

        return self.api_client.call_api(
            '/{dataViewName}/CollectionHits/{collectionHitId}',
            'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='CollectionHitDetail',  # noqa: E501
            auth_settings=auth_settings,
            async_req=local_var_params.get('async_req'),
            _return_http_data_only=local_var_params.get(
                '_return_http_data_only'),  # noqa: E501
            _preload_content=local_var_params.get('_preload_content', True),
            _request_timeout=local_var_params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #37
0
    def collection_hits_get_collection_hits_with_http_info(
            self, data_view_name, **kwargs):  # noqa: E501
        """Requires OrbitAdmin: Returns all the hit information for all collections.  # noqa: E501

        This endpoint is only available for users with the OrbitAdmin role  # noqa: E501
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please pass async_req=True
        >>> thread = api.collection_hits_get_collection_hits_with_http_info(data_view_name, async_req=True)
        >>> result = thread.get()

        :param async_req bool: execute request asynchronously
        :param str data_view_name: The name of the DataView to act on (required)
        :param str filter: Filter the list of items using a simple expression language.  The available list of fields are Username, Timestamp, UserAgentDetails
        :param str order_by: Order the items by a given field (in ascending order unless the field is preceeded by a \"-\" character).  The available list of fields are Username, Timestamp, UserAgentDetails
        :param int offset: The number of items to skip in the (potentially filtered) result set before returning subsequent items.
        :param int count: The maximum number of items to show from the (potentially filtered) result set.
        :param _return_http_data_only: response data without head status code
                                       and headers
        :param _preload_content: if False, the urllib3.HTTPResponse object will
                                 be returned without reading/decoding response
                                 data. Default is True.
        :param _request_timeout: timeout setting for this request. If one
                                 number provided, it will be total request
                                 timeout. It can also be a pair (tuple) of
                                 (connection, read) timeouts.
        :return: tuple(PagedResultsCollectionHitSummary, status_code(int), headers(HTTPHeaderDict))
                 If the method is called asynchronously,
                 returns the request thread.
        """

        local_var_params = locals()

        all_params = [
            'data_view_name', 'filter', 'order_by', 'offset', 'count'
        ]  # noqa: E501
        all_params.append('async_req')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        for key, val in six.iteritems(local_var_params['kwargs']):
            if key not in all_params:
                raise ApiTypeError(
                    "Got an unexpected keyword argument '%s'"
                    " to method collection_hits_get_collection_hits" % key)
            local_var_params[key] = val
        del local_var_params['kwargs']
        # verify the required parameter 'data_view_name' is set
        if ('data_view_name' not in local_var_params
                or local_var_params['data_view_name'] is None):
            raise ApiValueError(
                "Missing the required parameter `data_view_name` when calling `collection_hits_get_collection_hits`"
            )  # noqa: E501

        if 'offset' in local_var_params and local_var_params[
                'offset'] < 0:  # noqa: E501
            raise ApiValueError(
                "Invalid value for parameter `offset` when calling `collection_hits_get_collection_hits`, must be a value greater than or equal to `0`"
            )  # noqa: E501
        if 'count' in local_var_params and local_var_params[
                'count'] < 0:  # noqa: E501
            raise ApiValueError(
                "Invalid value for parameter `count` when calling `collection_hits_get_collection_hits`, must be a value greater than or equal to `0`"
            )  # noqa: E501
        collection_formats = {}

        path_params = {}
        if 'data_view_name' in local_var_params:
            path_params['dataViewName'] = local_var_params[
                'data_view_name']  # noqa: E501

        query_params = []
        if 'filter' in local_var_params:
            query_params.append(
                ('filter', local_var_params['filter']))  # noqa: E501
        if 'order_by' in local_var_params:
            query_params.append(
                ('orderBy', local_var_params['order_by']))  # noqa: E501
        if 'offset' in local_var_params:
            query_params.append(
                ('offset', local_var_params['offset']))  # noqa: E501
        if 'count' in local_var_params:
            query_params.append(
                ('count', local_var_params['count']))  # noqa: E501

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.select_header_accept(
            ['application/json', 'text/json', 'application/xml',
             'text/xml'])  # noqa: E501

        # Authentication setting
        auth_settings = ['faststats_auth']  # noqa: E501

        return self.api_client.call_api(
            '/{dataViewName}/CollectionHits',
            'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='PagedResultsCollectionHitSummary',  # noqa: E501
            auth_settings=auth_settings,
            async_req=local_var_params.get('async_req'),
            _return_http_data_only=local_var_params.get(
                '_return_http_data_only'),  # noqa: E501
            _preload_content=local_var_params.get('_preload_content', True),
            _request_timeout=local_var_params.get('_request_timeout'),
            collection_formats=collection_formats)
    def list_request_logs_with_http_info(self, **kwargs):
        """
        Gets the API request logging log files.
        Retrieves a list of log entries as a JSON or xml object or as a zip file containing the entries.  If the Accept header is set to application/zip, the response is a zip file containing individual text files, each representing an API request.  If the Accept header is set to `application/json` or `application/xml`, the response returns list of log entries in either JSON or XML. An example JSON response body is shown below. 
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.list_request_logs_with_http_info(callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param str encoding:
        :return: ApiRequestLogsResult
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['encoding']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method list_request_logs" % key)
            params[key] = val
        del params['kwargs']

        collection_formats = {}

        resource_path = '/v2/diagnostics/request_logs'.replace(
            '{format}', 'json')
        path_params = {}

        query_params = {}
        if 'encoding' in params:
            query_params['encoding'] = params['encoding']

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # Authentication setting
        auth_settings = []

        return self.api_client.call_api(
            resource_path,
            'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='ApiRequestLogsResult',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
    def get_request_log_with_http_info(self, request_log_id, **kwargs):
        """
        Gets a request logging log file.
        Retrieves information for a single log entry.  **Request** The `requestLogfId` property can be retrieved by getting the list of log entries. The Content-Transfer-Encoding header can be set to base64 to retrieve the API request/response as base 64 string. Otherwise the bytes of the request/response are returned.  **Response** If the Content-Transfer-Encoding header was set to base64, the log is returned as a base64 string.
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.get_request_log_with_http_info(request_log_id, callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param str request_log_id: (required)
        :return: file
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['request_log_id']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method get_request_log" % key)
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'request_log_id' is set
        if ('request_log_id'
                not in params) or (params['request_log_id'] is None):
            raise ValueError(
                "Missing the required parameter `request_log_id` when calling `get_request_log`"
            )

        collection_formats = {}

        resource_path = '/v2/diagnostics/request_logs/{requestLogId}'.replace(
            '{format}', 'json')
        path_params = {}
        if 'request_log_id' in params:
            path_params['requestLogId'] = params['request_log_id']

        query_params = {}

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['text/plain'])

        # Authentication setting
        auth_settings = []

        return self.api_client.call_api(
            resource_path,
            'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='file',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
    def delete_request_logs_with_http_info(self, **kwargs):
        """
        Deletes the request log files.
        Deletes the request log files.
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.delete_request_logs_with_http_info(callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :return: None
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = []
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method delete_request_logs" % key)
            params[key] = val
        del params['kwargs']

        collection_formats = {}

        resource_path = '/v2/diagnostics/request_logs'.replace(
            '{format}', 'json')
        path_params = {}

        query_params = {}

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # Authentication setting
        auth_settings = []

        return self.api_client.call_api(
            resource_path,
            'DELETE',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type=None,
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #41
0
    def test_loader_given_multiple_columns(self):
        class Loader1DataSet1(DataSet):
            col1 = Column(float32)
            col2 = Column(float32)

        class Loader1DataSet2(DataSet):
            col1 = Column(float32)
            col2 = Column(float32)

        class Loader2DataSet(DataSet):
            col1 = Column(float32)
            col2 = Column(float32)

        constants1 = {
            Loader1DataSet1.col1: 1,
            Loader1DataSet1.col2: 2,
            Loader1DataSet2.col1: 3,
            Loader1DataSet2.col2: 4
        }
        loader1 = RecordingConstantLoader(constants=constants1,
                                          dates=self.dates,
                                          assets=self.assets)
        constants2 = {Loader2DataSet.col1: 5, Loader2DataSet.col2: 6}
        loader2 = RecordingConstantLoader(constants=constants2,
                                          dates=self.dates,
                                          assets=self.assets)

        engine = SimplePipelineEngine(
            lambda column: loader2
            if column.dataset == Loader2DataSet else loader1,
            self.dates,
            self.asset_finder,
        )

        pipe_col1 = RollingSumSum(inputs=[
            Loader1DataSet1.col1, Loader1DataSet2.col1, Loader2DataSet.col1
        ],
                                  window_length=2)

        pipe_col2 = RollingSumSum(inputs=[
            Loader1DataSet1.col2, Loader1DataSet2.col2, Loader2DataSet.col2
        ],
                                  window_length=3)

        pipe_col3 = RollingSumSum(inputs=[Loader2DataSet.col1],
                                  window_length=3)

        columns = OrderedDict([
            ('pipe_col1', pipe_col1),
            ('pipe_col2', pipe_col2),
            ('pipe_col3', pipe_col3),
        ])
        result = engine.run_pipeline(
            Pipeline(columns=columns),
            self.dates[2],  # index is >= the largest window length - 1
            self.dates[-1])
        min_window = min(pip_col.window_length
                         for pip_col in itervalues(columns))
        col_to_val = ChainMap(constants1, constants2)
        vals = {
            name: (sum(col_to_val[col]
                       for col in pipe_col.inputs) * pipe_col.window_length)
            for name, pipe_col in iteritems(columns)
        }

        index = MultiIndex.from_product([self.dates[2:], self.assets])
        expected = DataFrame(data={
            col:
            concatenate((full(
                (columns[col].window_length - min_window) * index.levshape[1],
                nan),
                         full((index.levshape[0] -
                               (columns[col].window_length - min_window)) *
                              index.levshape[1], val)))
            for col, val in iteritems(vals)
        },
                             index=index,
                             columns=columns)

        assert_frame_equal(result, expected)

        self.assertEqual(
            set(loader1.load_calls), {
                ColumnArgs.sorted_by_ds(Loader1DataSet1.col1,
                                        Loader1DataSet2.col1),
                ColumnArgs.sorted_by_ds(Loader1DataSet1.col2,
                                        Loader1DataSet2.col2)
            })
        self.assertEqual(set(loader2.load_calls), {
            ColumnArgs.sorted_by_ds(Loader2DataSet.col1, Loader2DataSet.col2)
        })
    def get_service_with_http_info(self, **kwargs):
        """
        Retrieves the available REST API versions.
        Retrieves the available REST API versions.  DocuSign Production system: https://www.docusign.net/restapi/service_information DocuSign Demo system: https://demo.docusign.net/restapi/service_information  You do not need an integrator key to view the REST API versions and resources.
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.get_service_with_http_info(callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :return: ServiceInformation
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = []
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method get_service" % key)
            params[key] = val
        del params['kwargs']

        collection_formats = {}

        resource_path = '/service_information'.replace('{format}', 'json')
        path_params = {}

        query_params = {}

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # Authentication setting
        auth_settings = []

        return self.api_client.call_api(
            resource_path,
            'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='ServiceInformation',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #43
0
def get_final_text(pred_text, orig_text, do_lower_case):
    """Project the tokenized prediction back to the original text."""

    # When we created the data, we kept track of the alignment between original
    # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
    # now `orig_text` contains the span of our original text corresponding to the
    # span that we predicted.
    #
    # However, `orig_text` may contain extra characters that we don't want in
    # our prediction.
    #
    # For example, let's say:
    #   pred_text = steve smith
    #   orig_text = Steve Smith's
    #
    # We don't want to return `orig_text` because it contains the extra "'s".
    #
    # We don't want to return `pred_text` because it's already been normalized
    # (the SQuAD eval script also does punctuation stripping/lower casing but
    # our tokenizer does additional normalization like stripping accent
    # characters).
    #
    # What we really want to return is "Steve Smith".
    #
    # Therefore, we have to apply a semi-complicated alignment heruistic between
    # `pred_text` and `orig_text` to get a character-to-charcter alignment. This
    # can fail in certain cases in which case we just return `orig_text`.

    def _strip_spaces(text):
        ns_chars = []
        ns_to_s_map = collections.OrderedDict()
        for (i, c) in enumerate(text):
            if c == " ":
                continue
            ns_to_s_map[len(ns_chars)] = i
            ns_chars.append(c)
        ns_text = "".join(ns_chars)
        return (ns_text, ns_to_s_map)

    # We first tokenize `orig_text`, strip whitespace from the result
    # and `pred_text`, and check if they are the same length. If they are
    # NOT the same length, the heuristic has failed. If they are the same
    # length, we assume the characters are one-to-one aligned.
    tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)

    tok_text = " ".join(tokenizer.tokenize(orig_text))

    start_position = tok_text.find(pred_text)
    if start_position == -1:
        if FLAGS.verbose_logging:
            tf.logging.info("Unable to find text: '%s' in '%s'" %
                            (pred_text, orig_text))
        return orig_text
    end_position = start_position + len(pred_text) - 1

    (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
    (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)

    if len(orig_ns_text) != len(tok_ns_text):
        if FLAGS.verbose_logging:
            tf.logging.info(
                "Length not equal after stripping spaces: '%s' vs '%s'",
                orig_ns_text, tok_ns_text)
        return orig_text

    # We then project the characters in `pred_text` back to `orig_text` using
    # the character-to-character alignment.
    tok_s_to_ns_map = {}
    for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
        tok_s_to_ns_map[tok_index] = i

    orig_start_position = None
    if start_position in tok_s_to_ns_map:
        ns_start_position = tok_s_to_ns_map[start_position]
        if ns_start_position in orig_ns_to_s_map:
            orig_start_position = orig_ns_to_s_map[ns_start_position]

    if orig_start_position is None:
        if FLAGS.verbose_logging:
            tf.logging.info("Couldn't map start position")
        return orig_text

    orig_end_position = None
    if end_position in tok_s_to_ns_map:
        ns_end_position = tok_s_to_ns_map[end_position]
        if ns_end_position in orig_ns_to_s_map:
            orig_end_position = orig_ns_to_s_map[ns_end_position]

    if orig_end_position is None:
        if FLAGS.verbose_logging:
            tf.logging.info("Couldn't map end position")
        return orig_text

    output_text = orig_text[orig_start_position:(orig_end_position + 1)]
    return output_text
def get_matching_descriptions_data(company, transaction):
    if not transaction.description:
        return []

    bank_transactions = frappe.db.sql("""
		SELECT
			bt.name, bt.description, bt.date, btp.payment_document, btp.payment_entry
		FROM
			`tabBank Transaction` as bt
		LEFT JOIN
			`tabBank Transaction Payments` as btp
		ON
			bt.name = btp.parent
		WHERE
			bt.allocated_amount > 0
		AND
			bt.docstatus = 1
		""",
                                      as_dict=True)

    selection = []
    for bank_transaction in bank_transactions:
        if bank_transaction.description:
            seq = difflib.SequenceMatcher(lambda x: x == " ",
                                          transaction.description,
                                          bank_transaction.description)

            if seq.ratio() > 0.6:
                bank_transaction["ratio"] = seq.ratio()
                selection.append(bank_transaction)

    document_types = set([x["payment_document"] for x in selection])

    links = {}
    for document_type in document_types:
        links[document_type] = [
            x["payment_entry"] for x in selection
            if x["payment_document"] == document_type
        ]

    data = []
    company_currency = get_company_currency(company)
    for key, value in iteritems(links):
        if key == "Payment Entry":
            data.extend(
                frappe.get_all("Payment Entry",
                               filters=[["name", "in", value]],
                               fields=[
                                   "'Payment Entry' as doctype",
                                   "posting_date", "party", "reference_no",
                                   "reference_date", "paid_amount",
                                   "paid_to_account_currency as currency"
                               ]))
        if key == "Journal Entry":
            journal_entries = frappe.get_all(
                "Journal Entry",
                filters=[["name", "in", value]],
                fields=[
                    "name", "'Journal Entry' as doctype", "posting_date",
                    "paid_to_recd_from as party", "cheque_no as reference_no",
                    "cheque_date as reference_date",
                    "total_credit as paid_amount"
                ])
            for journal_entry in journal_entries:
                journal_entry_accounts = frappe.get_all(
                    "Journal Entry Account",
                    filters={
                        "parenttype": journal_entry["doctype"],
                        "parent": journal_entry["name"]
                    },
                    fields=["account_currency"])
                journal_entry["currency"] = journal_entry_accounts[0][
                    "account_currency"] if journal_entry_accounts else company_currency
            data.extend(journal_entries)
        if key == "Sales Invoice":
            data.extend(
                frappe.get_all("Sales Invoice",
                               filters=[["name", "in", value]],
                               fields=[
                                   "'Sales Invoice' as doctype",
                                   "posting_date", "customer_name as party",
                                   "paid_amount", "currency"
                               ]))
        if key == "Purchase Invoice":
            data.extend(
                frappe.get_all("Purchase Invoice",
                               filters=[["name", "in", value]],
                               fields=[
                                   "'Purchase Invoice' as doctype",
                                   "posting_date", "supplier_name as party",
                                   "paid_amount", "currency"
                               ]))
        if key == "Expense Claim":
            expense_claims = frappe.get_all(
                "Expense Claim",
                filters=[["name", "in", value]],
                fields=[
                    "'Expense Claim' as doctype", "posting_date",
                    "employee_name as party",
                    "total_amount_reimbursed as paid_amount"
                ])
            data.extend([
                dict(x, **{"currency": company_currency})
                for x in expense_claims
            ])

    return data
Exemple #45
0
def write_arguments_to_file(args, filename):
    with open(filename, 'w') as f:
        for key, value in iteritems(vars(args)):
            f.write('%s: %s\n' % (key, str(value)))
def get_items_for_material_requests(doc, ignore_existing_ordered_qty=None):
	if isinstance(doc, string_types):
		doc = frappe._dict(json.loads(doc))

	doc['mr_items'] = []
	po_items = doc.get('po_items') if doc.get('po_items') else doc.get('items')
	company = doc.get('company')
	warehouse = doc.get('for_warehouse')

	if not ignore_existing_ordered_qty:
		ignore_existing_ordered_qty = doc.get('ignore_existing_ordered_qty')

	so_item_details = frappe._dict()
	for data in po_items:
		planned_qty = data.get('required_qty') or data.get('planned_qty')
		ignore_existing_ordered_qty = data.get('ignore_existing_ordered_qty') or ignore_existing_ordered_qty
		warehouse = data.get("warehouse") or warehouse

		item_details = {}
		if data.get("bom") or data.get("bom_no"):
			if data.get('required_qty'):
				bom_no = data.get('bom')
				include_non_stock_items = 1
				include_subcontracted_items = 1 if data.get('include_exploded_items') else 0
			else:
				bom_no = data.get('bom_no')
				include_subcontracted_items = doc.get('include_subcontracted_items')
				include_non_stock_items = doc.get('include_non_stock_items')

			if not planned_qty:
				frappe.throw(_("For row {0}: Enter Planned Qty").format(data.get('idx')))

			if bom_no:
				if data.get('include_exploded_items') and include_subcontracted_items:
					# fetch exploded items from BOM
					item_details = get_exploded_items(item_details,
						company, bom_no, include_non_stock_items, planned_qty=planned_qty)
				else:
					item_details = get_subitems(doc, data, item_details, bom_no, company,
						include_non_stock_items, include_subcontracted_items, 1, planned_qty=planned_qty)
		elif data.get('item_code'):
			item_master = frappe.get_doc('Item', data['item_code']).as_dict()
			purchase_uom = item_master.purchase_uom or item_master.stock_uom
			conversion_factor = 0
			for d in item_master.get("uoms"):
				if d.uom == purchase_uom:
					conversion_factor = d.conversion_factor

			item_details[item_master.name] = frappe._dict(
				{
					'item_name' : item_master.item_name,
					'default_bom' : doc.bom,
					'purchase_uom' : purchase_uom,
					'default_warehouse': item_master.default_warehouse,
					'min_order_qty' : item_master.min_order_qty,
					'default_material_request_type' : item_master.default_material_request_type,
					'qty': planned_qty or 1,
					'is_sub_contracted' : item_master.is_subcontracted_item,
					'item_code' : item_master.name,
					'description' : item_master.description,
					'stock_uom' : item_master.stock_uom,
					'conversion_factor' : conversion_factor,
				}
			)

		sales_order = doc.get("sales_order")

		for item_code, details in iteritems(item_details):
			so_item_details.setdefault(sales_order, frappe._dict())
			if item_code in so_item_details.get(sales_order, {}):
				so_item_details[sales_order][item_code]['qty'] = so_item_details[sales_order][item_code].get("qty", 0) + flt(details.qty)
			else:
				so_item_details[sales_order][item_code] = details

	mr_items = []
	for sales_order, item_code in iteritems(so_item_details):
		item_dict = so_item_details[sales_order]
		for details in item_dict.values():
			bin_dict = get_bin_details(details, doc.company, warehouse)
			bin_dict = bin_dict[0] if bin_dict else {}

			if details.qty > 0:
				items = get_material_request_items(details, sales_order, company,
					ignore_existing_ordered_qty, warehouse, bin_dict)
				if items:
					mr_items.append(items)

	if not mr_items:
		frappe.msgprint(_("""As raw materials projected quantity is more than required quantity, there is no need to create material request.
			Still if you want to make material request, kindly enable <b>Ignore Existing Projected Quantity</b> checkbox"""))

	return mr_items
    def update_file_system_snapshots_with_http_info(self, name, attributes,
                                                    **kwargs):
        """
        Update an existing file system snapshot
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.update_file_system_snapshots_with_http_info(name, attributes, callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param str name: The name of the file system or snapshot to be updated. (required)
        :param SnapshotSuffix attributes: the new attributes, only modifiable fields could be used. (required)
        :return: FileSystemSnapshotResponse
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['name', 'attributes']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method update_file_system_snapshots" %
                                key)
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'name' is set
        if ('name' not in params) or (params['name'] is None):
            raise ValueError(
                "Missing the required parameter `name` when calling `update_file_system_snapshots`"
            )
        # verify the required parameter 'attributes' is set
        if ('attributes' not in params) or (params['attributes'] is None):
            raise ValueError(
                "Missing the required parameter `attributes` when calling `update_file_system_snapshots`"
            )

        collection_formats = {}

        path_params = {}

        query_params = []
        if 'name' in params:
            query_params.append(('name', params['name']))

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        if 'attributes' in params:
            body_params = params['attributes']
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.\
            select_header_content_type(['application/json'])

        # Authentication setting
        auth_settings = ['AuthTokenHeader']

        return self.api_client.call_api(
            '/1.2/file-system-snapshots',
            'PATCH',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='FileSystemSnapshotResponse',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #48
0
def convert_examples_to_features(examples, tokenizer, max_seq_length,
                                 doc_stride, max_query_length, is_training,
                                 output_fn):
    """Loads a data file into a list of `InputBatch`s."""

    unique_id = 1000000000

    for (example_index, example) in enumerate(examples):
        query_tokens = tokenizer.tokenize(example.question_text)

        if len(query_tokens) > max_query_length:
            query_tokens = query_tokens[0:max_query_length]

        tok_to_orig_index = []
        orig_to_tok_index = []
        all_doc_tokens = []
        for (i, token) in enumerate(example.doc_tokens):
            orig_to_tok_index.append(len(all_doc_tokens))
            sub_tokens = tokenizer.tokenize(token)
            for sub_token in sub_tokens:
                tok_to_orig_index.append(i)
                all_doc_tokens.append(sub_token)

        tok_start_position = None
        tok_end_position = None
        if is_training:
            tok_start_position = orig_to_tok_index[example.start_position]
            if example.end_position < len(example.doc_tokens) - 1:
                tok_end_position = orig_to_tok_index[example.end_position +
                                                     1] - 1
            else:
                tok_end_position = len(all_doc_tokens) - 1
            (tok_start_position, tok_end_position) = _improve_answer_span(
                all_doc_tokens, tok_start_position, tok_end_position,
                tokenizer, example.orig_answer_text)

        # The -3 accounts for [CLS], [SEP] and [SEP]
        max_tokens_for_doc = max_seq_length - len(query_tokens) - 3

        # We can have documents that are longer than the maximum sequence length.
        # To deal with this we do a sliding window approach, where we take chunks
        # of the up to our max length with a stride of `doc_stride`.
        _DocSpan = collections.namedtuple(  # pylint: disable=invalid-name
            "DocSpan", ["start", "length"])
        doc_spans = []
        start_offset = 0
        while start_offset < len(all_doc_tokens):
            length = len(all_doc_tokens) - start_offset
            if length > max_tokens_for_doc:
                length = max_tokens_for_doc
            doc_spans.append(_DocSpan(start=start_offset, length=length))
            if start_offset + length == len(all_doc_tokens):
                break
            start_offset += min(length, doc_stride)

        for (doc_span_index, doc_span) in enumerate(doc_spans):
            tokens = []
            token_to_orig_map = {}
            token_is_max_context = {}
            segment_ids = []
            tokens.append("[CLS]")
            segment_ids.append(0)
            for token in query_tokens:
                tokens.append(token)
                segment_ids.append(0)
            tokens.append("[SEP]")
            segment_ids.append(0)

            for i in range(doc_span.length):
                split_token_index = doc_span.start + i
                token_to_orig_map[len(
                    tokens)] = tok_to_orig_index[split_token_index]

                is_max_context = _check_is_max_context(doc_spans,
                                                       doc_span_index,
                                                       split_token_index)
                token_is_max_context[len(tokens)] = is_max_context
                tokens.append(all_doc_tokens[split_token_index])
                segment_ids.append(1)
            tokens.append("[SEP]")
            segment_ids.append(1)

            input_ids = tokenizer.convert_tokens_to_ids(tokens)

            # The mask has 1 for real tokens and 0 for padding tokens. Only real
            # tokens are attended to.
            input_mask = [1] * len(input_ids)

            # Zero-pad up to the sequence length.
            while len(input_ids) < max_seq_length:
                input_ids.append(0)
                input_mask.append(0)
                segment_ids.append(0)

            assert len(input_ids) == max_seq_length
            assert len(input_mask) == max_seq_length
            assert len(segment_ids) == max_seq_length

            start_position = None
            end_position = None
            if is_training:
                # For training, if our document chunk does not contain an annotation
                # we throw it out, since there is nothing to predict.
                doc_start = doc_span.start
                doc_end = doc_span.start + doc_span.length - 1
                if (example.start_position < doc_start
                        or example.end_position < doc_start
                        or example.start_position > doc_end
                        or example.end_position > doc_end):
                    continue

                doc_offset = len(query_tokens) + 2
                start_position = tok_start_position - doc_start + doc_offset
                end_position = tok_end_position - doc_start + doc_offset

            if example_index < 20:
                tf.logging.info("*** Example ***")
                tf.logging.info("unique_id: %s" % (unique_id))
                tf.logging.info("example_index: %s" % (example_index))
                tf.logging.info("doc_span_index: %s" % (doc_span_index))
                tf.logging.info(
                    "tokens: %s" %
                    " ".join([tokenization.printable_text(x) for x in tokens]))
                tf.logging.info("token_to_orig_map: %s" % " ".join([
                    "%d:%d" % (x, y)
                    for (x, y) in six.iteritems(token_to_orig_map)
                ]))
                tf.logging.info("token_is_max_context: %s" % " ".join([
                    "%d:%s" % (x, y)
                    for (x, y) in six.iteritems(token_is_max_context)
                ]))
                tf.logging.info("input_ids: %s" %
                                " ".join([str(x) for x in input_ids]))
                tf.logging.info("input_mask: %s" %
                                " ".join([str(x) for x in input_mask]))
                tf.logging.info("segment_ids: %s" %
                                " ".join([str(x) for x in segment_ids]))
                if is_training:
                    answer_text = " ".join(
                        tokens[start_position:(end_position + 1)])
                    tf.logging.info("start_position: %d" % (start_position))
                    tf.logging.info("end_position: %d" % (end_position))
                    tf.logging.info("answer: %s" %
                                    (tokenization.printable_text(answer_text)))

            feature = InputFeatures(unique_id=unique_id,
                                    example_index=example_index,
                                    doc_span_index=doc_span_index,
                                    tokens=tokens,
                                    token_to_orig_map=token_to_orig_map,
                                    token_is_max_context=token_is_max_context,
                                    input_ids=input_ids,
                                    input_mask=input_mask,
                                    segment_ids=segment_ids,
                                    start_position=start_position,
                                    end_position=end_position)

            # Run callback
            output_fn(feature)

            unique_id += 1
Exemple #49
0
    def get_user_profile_in_workspace_with_http_info(self, workspace_id,
                                                     **kwargs):
        """
        user related profile in workspace
        Users can be invited under an account by the administrator, and all resources (e.g. resources, networks, blueprints, credentials, clusters) can be shared across account users
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.get_user_profile_in_workspace_with_http_info(workspace_id, callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param int workspace_id: (required)
        :return: UserProfileResponse
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['workspace_id']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method get_user_profile_in_workspace" %
                                key)
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'workspace_id' is set
        if ('workspace_id' not in params) or (params['workspace_id'] is None):
            raise ValueError(
                "Missing the required parameter `workspace_id` when calling `get_user_profile_in_workspace`"
            )

        collection_formats = {}

        path_params = {}
        if 'workspace_id' in params:
            path_params['workspaceId'] = params['workspace_id']

        query_params = []

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.\
            select_header_content_type(['application/json'])

        # Authentication setting
        auth_settings = ['tokenAuth']

        return self.api_client.call_api(
            '/v3/{workspaceId}/users/profile',
            'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='UserProfileResponse',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
    def create_file_system_snapshots_with_http_info(self, sources, **kwargs):
        """
        Create snapshots for the specified source file systems
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.create_file_system_snapshots_with_http_info(sources, callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param list[str] sources: A list of names of source file systems. (required)
        :param SnapshotSuffix suffix: the suffix of the snapshot
        :return: FileSystemSnapshotResponse
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['sources', 'suffix']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method create_file_system_snapshots" %
                                key)
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'sources' is set
        if ('sources' not in params) or (params['sources'] is None):
            raise ValueError(
                "Missing the required parameter `sources` when calling `create_file_system_snapshots`"
            )

        collection_formats = {}

        path_params = {}

        query_params = []
        if 'sources' in params:
            query_params.append(('sources', params['sources']))
            collection_formats['sources'] = 'csv'

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        if 'suffix' in params:
            body_params = params['suffix']
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.\
            select_header_content_type(['application/json'])

        # Authentication setting
        auth_settings = ['AuthTokenHeader']

        return self.api_client.call_api(
            '/1.2/file-system-snapshots',
            'POST',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='FileSystemSnapshotResponse',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #51
0
    def list_secret_keys_with_http_info(self, app_id, **kwargs):
        """
        List the secret keys for the specified app.
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.list_secret_keys_with_http_info(app_id, callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param str app_id: Identifies the app. (required)
        :return: ListSecretKeysResponse
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['app_id']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError(
                    "Got an unexpected keyword argument '%s'"
                    " to method list_secret_keys" % key
                )
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'app_id' is set
        if ('app_id' not in params) or (params['app_id'] is None):
            raise ValueError("Missing the required parameter `app_id` when calling `list_secret_keys`")


        collection_formats = {}

        path_params = {}
        if 'app_id' in params:
            path_params['appId'] = params['app_id']

        query_params = []

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.\
            select_header_content_type(['application/json'])

        # Authentication setting
        auth_settings = ['basicAuth', 'jwt']

        return self.api_client.call_api('/v1.1/apps/{appId}/keys', 'GET',
                                        path_params,
                                        query_params,
                                        header_params,
                                        body=body_params,
                                        post_params=form_params,
                                        files=local_var_files,
                                        response_type='ListSecretKeysResponse',
                                        auth_settings=auth_settings,
                                        callback=params.get('callback'),
                                        _return_http_data_only=params.get('_return_http_data_only'),
                                        _preload_content=params.get('_preload_content', True),
                                        _request_timeout=params.get('_request_timeout'),
                                        collection_formats=collection_formats)
    def list_file_system_snapshots_with_http_info(self, **kwargs):
        """
        List file system snapshots
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.list_file_system_snapshots_with_http_info(callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param str filter: The filter to be used for query.
        :param str sort: The way to order the results.
        :param int start: start
        :param int limit: limit, should be >= 0
        :param str token: token
        :param bool total: Return a total object in addition to the other results.
        :param bool total_only: Return only the total object.
        :param list[str] names_or_sources: A comma-separated list of resource names. Either the name of the snapshot or the source.
        :return: FileSystemSnapshotResponse
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = [
            'filter', 'sort', 'start', 'limit', 'token', 'total', 'total_only',
            'names_or_sources'
        ]
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError("Got an unexpected keyword argument '%s'"
                                " to method list_file_system_snapshots" % key)
            params[key] = val
        del params['kwargs']

        collection_formats = {}

        path_params = {}

        query_params = []
        if 'filter' in params:
            query_params.append(('filter', params['filter']))
        if 'sort' in params:
            query_params.append(('sort', params['sort']))
        if 'start' in params:
            query_params.append(('start', params['start']))
        if 'limit' in params:
            query_params.append(('limit', params['limit']))
        if 'token' in params:
            query_params.append(('token', params['token']))
        if 'total' in params:
            query_params.append(('total', params['total']))
        if 'total_only' in params:
            query_params.append(('total_only', params['total_only']))
        if 'names_or_sources' in params:
            query_params.append(
                ('names_or_sources', params['names_or_sources']))
            collection_formats['names_or_sources'] = 'csv'

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.\
            select_header_content_type(['application/json'])

        # Authentication setting
        auth_settings = ['AuthTokenHeader']

        return self.api_client.call_api(
            '/1.2/file-system-snapshots',
            'GET',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='FileSystemSnapshotResponse',
            auth_settings=auth_settings,
            callback=params.get('callback'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #53
0
 def selected_key(self):
     for key, value in six.iteritems(self.values):
         if value == unicode(self.currentText()).strip():
             return key
Exemple #54
0
    def from_image(cls, context, image_id, image_meta):
        """Returns VMwareImage, the subset of properties the driver uses.

        :param context - context
        :param image_id - image id of image
        :param image_meta - image metadata object we are working with
        :return: vmware image object
        :rtype: nova.virt.vmwareapi.images.VmwareImage
        """
        properties = image_meta.properties

        # calculate linked_clone flag, allow image properties to override the
        # global property set in the configurations.
        image_linked_clone = properties.get('img_linked_clone',
                                            CONF.vmware.use_linked_clone)

        # catch any string values that need to be interpreted as boolean values
        linked_clone = strutils.bool_from_string(image_linked_clone)

        if image_meta.obj_attr_is_set('container_format'):
            container_format = image_meta.container_format
        else:
            container_format = None

        props = {
            'image_id': image_id,
            'linked_clone': linked_clone,
            'container_format': container_format,
            'vsphere_location': get_vsphere_location(context, image_id)
        }

        if image_meta.obj_attr_is_set('size'):
            props['file_size'] = image_meta.size
        if image_meta.obj_attr_is_set('disk_format'):
            props['file_type'] = image_meta.disk_format
        hw_disk_bus = properties.get('hw_disk_bus')
        if hw_disk_bus:
            mapping = {
                fields.SCSIModel.LSILOGIC: constants.DEFAULT_ADAPTER_TYPE,
                fields.SCSIModel.LSISAS1068:
                constants.ADAPTER_TYPE_LSILOGICSAS,
                fields.SCSIModel.BUSLOGIC: constants.ADAPTER_TYPE_BUSLOGIC,
                fields.SCSIModel.VMPVSCSI: constants.ADAPTER_TYPE_PARAVIRTUAL,
            }
            if hw_disk_bus == fields.DiskBus.IDE:
                props['adapter_type'] = constants.ADAPTER_TYPE_IDE
            elif hw_disk_bus == fields.DiskBus.SCSI:
                hw_scsi_model = properties.get('hw_scsi_model')
                props['adapter_type'] = mapping.get(hw_scsi_model)

        props_map = {
            'os_distro': 'os_type',
            'hw_disk_type': 'disk_type',
            'hw_vif_model': 'vif_model'
        }

        for k, v in six.iteritems(props_map):
            if properties.obj_attr_is_set(k):
                props[v] = properties.get(k)

        return cls(**props)
    def modify_directory_list_with_http_info(self, directory_list_id, directory_list, api_version, **kwargs):  # noqa: E501
        """Modify a Directory List  # noqa: E501

        Modify a directory list by ID. Any unset elements will be left unchanged.  # noqa: E501
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please pass async_req=True
        >>> thread = api.modify_directory_list_with_http_info(directory_list_id, directory_list, api_version, async_req=True)
        >>> result = thread.get()

        :param async_req bool
        :param int directory_list_id: The ID number of the directory list to modify. (required)
        :param DirectoryList directory_list: The settings of the directory list to modify. (required)
        :param str api_version: The version of the api being called. (required)
        :return: DirectoryList
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['directory_list_id', 'directory_list', 'api_version']  # noqa: E501
        all_params.append('async_req')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in six.iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError(
                    "Got an unexpected keyword argument '%s'"
                    " to method modify_directory_list" % key
                )
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'directory_list_id' is set
        if ('directory_list_id' not in params or
                params['directory_list_id'] is None):
            raise ValueError("Missing the required parameter `directory_list_id` when calling `modify_directory_list`")  # noqa: E501
        # verify the required parameter 'directory_list' is set
        if ('directory_list' not in params or
                params['directory_list'] is None):
            raise ValueError("Missing the required parameter `directory_list` when calling `modify_directory_list`")  # noqa: E501
        # verify the required parameter 'api_version' is set
        if ('api_version' not in params or
                params['api_version'] is None):
            raise ValueError("Missing the required parameter `api_version` when calling `modify_directory_list`")  # noqa: E501

        if 'directory_list_id' in params and not re.search('\\d+', str(params['directory_list_id'])):  # noqa: E501
            raise ValueError("Invalid value for parameter `directory_list_id` when calling `modify_directory_list`, must conform to the pattern `/\\d+/`")  # noqa: E501
        collection_formats = {}

        path_params = {}
        if 'directory_list_id' in params:
            path_params['directoryListID'] = params['directory_list_id']  # noqa: E501

        query_params = []

        header_params = {}
        if 'api_version' in params:
            header_params['api-version'] = params['api_version']  # noqa: E501

        form_params = []
        local_var_files = {}

        body_params = None
        if 'directory_list' in params:
            body_params = params['directory_list']
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.select_header_accept(
            ['application/json'])  # noqa: E501

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.select_header_content_type(  # noqa: E501
            ['application/json'])  # noqa: E501

        # Authentication setting
        auth_settings = ['DefaultAuthentication']  # noqa: E501

        return self.api_client.call_api(
            '/directorylists/{directoryListID}', 'POST',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='DirectoryList',  # noqa: E501
            auth_settings=auth_settings,
            async_req=params.get('async_req'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #56
0
    def list_apps_with_http_info(self, **kwargs):
        """
        List all apps configured.
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please define a `callback` function
        to be invoked when receiving the response.
        >>> def callback_function(response):
        >>>     pprint(response)
        >>>
        >>> thread = api.list_apps_with_http_info(callback=callback_function)

        :param callback function: The callback function
            for asynchronous request. (optional)
        :param int limit: The number of records to return.
        :param int offset: The number of initial records to skip before picking records to return.
        :param str service_account_id: The service account ID for which to list apps.
        :return: ListAppsResponse
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['limit', 'offset', 'service_account_id']
        all_params.append('callback')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError(
                    "Got an unexpected keyword argument '%s'"
                    " to method list_apps" % key
                )
            params[key] = val
        del params['kwargs']


        collection_formats = {}

        path_params = {}

        query_params = []
        if 'limit' in params:
            query_params.append(('limit', params['limit']))
        if 'offset' in params:
            query_params.append(('offset', params['offset']))
        if 'service_account_id' in params:
            query_params.append(('serviceAccountId', params['service_account_id']))

        header_params = {}

        form_params = []
        local_var_files = {}

        body_params = None
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.\
            select_header_accept(['application/json'])

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.\
            select_header_content_type(['application/json'])

        # Authentication setting
        auth_settings = ['basicAuth', 'jwt']

        return self.api_client.call_api('/v1.1/apps', 'GET',
                                        path_params,
                                        query_params,
                                        header_params,
                                        body=body_params,
                                        post_params=form_params,
                                        files=local_var_files,
                                        response_type='ListAppsResponse',
                                        auth_settings=auth_settings,
                                        callback=params.get('callback'),
                                        _return_http_data_only=params.get('_return_http_data_only'),
                                        _preload_content=params.get('_preload_content', True),
                                        _request_timeout=params.get('_request_timeout'),
                                        collection_formats=collection_formats)
Exemple #57
0
    def update(self, current, values=None, force=False):
        """Update progress bar, and print to standard output if `force`
    is True, or the last update was completed longer than `interval`
    amount of time ago, or `current` >= `target`.

    The written output is the progress bar and all unique values.

    Args:
      current: int.
        Index of current step.
      values: dict of str to float.
        Dict of name by value-for-last-step. The progress bar
        will display averages for these values.
      force: bool.
        Whether to force visual progress update.
    """
        if values is None:
            values = {}

        for k, v in six.iteritems(values):
            self.stored_values[k] = v

        self.seen_so_far = current

        now = time.time()
        if (not force and (now - self.last_update) < self.interval
                and current < self.target):
            return

        self.last_update = now
        if self.verbose == 0:
            return

        prev_total_width = self.total_width
        sys.stdout.write("\b" * prev_total_width)
        sys.stdout.write("\r")

        # Write progress bar to stdout.
        n_digits = len(str(self.target))
        bar = '%%%dd/%%%dd' % (n_digits, n_digits) % (current, self.target)
        bar += ' [{0}%]'.format(str(int(current / self.target * 100)).rjust(3))
        bar += ' '
        prog_width = int(self.width * float(current) / self.target)
        if prog_width > 0:
            bar += ('█' * prog_width)

        bar += (' ' * (self.width - prog_width))
        sys.stdout.write(bar)

        # Write values to stdout.
        if current:
            time_per_unit = (now - self.start) / current
        else:
            time_per_unit = 0

        eta = time_per_unit * (self.target - current)
        info = ''
        if current < self.target:
            info += ' ETA: %ds' % eta
        else:
            info += ' Elapsed: %ds' % (now - self.start)

        for k, v in six.iteritems(self.stored_values):
            info += ' | {0:s}: {1:0.3f}'.format(k, v)

        self.total_width = len(bar) + len(info)
        if prev_total_width > self.total_width:
            info += ((prev_total_width - self.total_width) * " ")

        sys.stdout.write(info)
        sys.stdout.flush()

        if current >= self.target:
            sys.stdout.write("\n")
    def search_directory_lists_with_http_info(self, api_version, **kwargs):  # noqa: E501
        """Search Directory Lists  # noqa: E501

        Search for directory lists using optional filters.  # noqa: E501
        This method makes a synchronous HTTP request by default. To make an
        asynchronous HTTP request, please pass async_req=True
        >>> thread = api.search_directory_lists_with_http_info(api_version, async_req=True)
        >>> result = thread.get()

        :param async_req bool
        :param str api_version: The version of the api being called. (required)
        :param SearchFilter search_filter: A collection of options used to filter the search results.
        :return: DirectoryLists
                 If the method is called asynchronously,
                 returns the request thread.
        """

        all_params = ['api_version', 'search_filter']  # noqa: E501
        all_params.append('async_req')
        all_params.append('_return_http_data_only')
        all_params.append('_preload_content')
        all_params.append('_request_timeout')

        params = locals()
        for key, val in six.iteritems(params['kwargs']):
            if key not in all_params:
                raise TypeError(
                    "Got an unexpected keyword argument '%s'"
                    " to method search_directory_lists" % key
                )
            params[key] = val
        del params['kwargs']
        # verify the required parameter 'api_version' is set
        if ('api_version' not in params or
                params['api_version'] is None):
            raise ValueError("Missing the required parameter `api_version` when calling `search_directory_lists`")  # noqa: E501

        collection_formats = {}

        path_params = {}

        query_params = []

        header_params = {}
        if 'api_version' in params:
            header_params['api-version'] = params['api_version']  # noqa: E501

        form_params = []
        local_var_files = {}

        body_params = None
        if 'search_filter' in params:
            body_params = params['search_filter']
        # HTTP header `Accept`
        header_params['Accept'] = self.api_client.select_header_accept(
            ['application/json'])  # noqa: E501

        # HTTP header `Content-Type`
        header_params['Content-Type'] = self.api_client.select_header_content_type(  # noqa: E501
            ['application/json'])  # noqa: E501

        # Authentication setting
        auth_settings = ['DefaultAuthentication']  # noqa: E501

        return self.api_client.call_api(
            '/directorylists/search', 'POST',
            path_params,
            query_params,
            header_params,
            body=body_params,
            post_params=form_params,
            files=local_var_files,
            response_type='DirectoryLists',  # noqa: E501
            auth_settings=auth_settings,
            async_req=params.get('async_req'),
            _return_http_data_only=params.get('_return_http_data_only'),
            _preload_content=params.get('_preload_content', True),
            _request_timeout=params.get('_request_timeout'),
            collection_formats=collection_formats)
Exemple #59
0
    def make_vm(self, nodes, thunks,
                input_storage, output_storage, storage_map,
                post_thunk_clear,
                computed,
                compute_map,
                updated_vars,
                ):

        pre_call_clear = [storage_map[v] for v in self.no_recycling]

        if (self.callback is not None or self.callback_input is not None or
                (config.profile and config.profile_memory) or
                (self.allow_partial_eval and not self.use_cloop)):

            if self.use_cloop and (self.callback is not None or
                                   self.callback_input is not None):
                logger.warn('CVM does not support callback, using Stack VM.')
            if self.use_cloop and config.profile_memory:
                warnings.warn(
                    'CVM does not support memory profile, using Stack VM.')
            if not self.use_cloop and self.allow_partial_eval:
                warnings.warn(
                    'LoopGC does not support partial evaluation, '
                    'using Stack VM.')
            # Needed for allow_gc=True, profiling and storage_map reuse
            deps = self.compute_gc_dependencies(storage_map)
            vm = Stack(
                nodes, thunks, pre_call_clear,
                storage_map, compute_map,
                self.fgraph, self.allow_gc,
                len(updated_vars),
                dependencies=deps,
                callback=self.callback,
                callback_input=self.callback_input)
        elif self.use_cloop:
            # create a map from nodes to ints and vars to ints
            nodes_idx = {}
            vars_idx = {}
            for i, node in enumerate(nodes):
                nodes_idx[node] = i
                for v in node.inputs + node.outputs:
                    vars_idx.setdefault(v, len(vars_idx))
            for v in self.fgraph.inputs + self.fgraph.outputs:
                vars_idx.setdefault(v, len(vars_idx))

            nodes_idx_inv = {}
            vars_idx_inv = {}
            for (node, i) in iteritems(nodes_idx):
                nodes_idx_inv[i] = node
            for (var, i) in iteritems(vars_idx):
                vars_idx_inv[i] = var

            # put storage_map and compute_map into a int-based scheme
            storage_map_list = [storage_map[vars_idx_inv[i]]
                                for i in xrange(len(vars_idx_inv))]
            compute_map_list = [compute_map[vars_idx_inv[i]]
                                for i in xrange(len(vars_idx_inv))]
            if nodes:
                assert type(storage_map_list[0]) is list
                assert type(compute_map_list[0]) is list

            # Needed for allow_gc=True, profiling and storage_map reuse
            dependency_map = self.compute_gc_dependencies(storage_map)
            dependency_map_list = [
                [vars_idx[d] for d in dependency_map[vars_idx_inv[i]]]
                for i in xrange(len(vars_idx_inv))]

            # build the pointers to node inputs and offsets
            base_input_output_list = []
            node_n_inputs = []
            node_n_outputs = []
            node_input_offset = []
            node_output_offset = []
            for node in nodes:
                inputs_idx = [vars_idx[v] for v in node.inputs]
                outputs_idx = [vars_idx[v] for v in node.outputs]
                node_n_inputs.append(len(inputs_idx))
                node_n_outputs.append(len(outputs_idx))
                node_input_offset.append(len(base_input_output_list))
                base_input_output_list.extend(inputs_idx)
                node_output_offset.append(len(base_input_output_list))
                base_input_output_list.extend(outputs_idx)

            # build the var owner array
            var_owner = [None] * len(vars_idx)
            for (var, i) in iteritems(vars_idx):
                if var.owner:
                    var_owner[i] = nodes_idx[var.owner]

            is_lazy_list = [int(th.lazy) for th in thunks]
            output_vars = [vars_idx[v] for v in self.fgraph.outputs]

            # builds the list of prereqs induced by e.g. destroy_handler
            ords = self.fgraph.orderings()
            node_prereqs = []
            node_output_size = []
            for i, node in enumerate(nodes):
                node_output_size.append(0)
                prereq_var_idxs = []
                for prereq_node in ords.get(node, []):
                    prereq_var_idxs.extend(
                        [vars_idx[v] for v in prereq_node.outputs])
                prereq_var_idxs = list(set(prereq_var_idxs))
                prereq_var_idxs.sort()  # TODO: why sort?
                node_prereqs.append(prereq_var_idxs)

            # Builds the list of input storage to update (according to update
            # rules) when the outputs are computed.
            # They are in the same order as the second part of output_vars
            # (output_vars contains first the returned outputs, then the
            # values of the update expressions).
            update_storage = []
            update_in_from_out = {}
            for (ivar, ovar) in iteritems(updated_vars):
                update_in_from_out[vars_idx[ovar]] = vars_idx[ivar]
            for oidx in output_vars:
                if oidx in update_in_from_out:
                    update_storage.append(update_in_from_out[oidx])

            c0 = sys.getrefcount(node_n_inputs)
            vm = CVM(
                nodes,
                thunks,
                pre_call_clear,
                allow_gc=self.allow_gc,
                call_counts=[0] * len(nodes),
                call_times=[0.0] * len(nodes),
                compute_map_list=compute_map_list,
                storage_map_list=storage_map_list,
                base_input_output_list=base_input_output_list,
                node_n_inputs=node_n_inputs,
                node_n_outputs=node_n_outputs,
                node_input_offset=node_input_offset,
                node_output_offset=node_output_offset,
                var_owner=var_owner,
                is_lazy_list=is_lazy_list,
                output_vars=output_vars,
                node_prereqs=node_prereqs,
                node_output_size=node_output_size,
                update_storage=update_storage,
                dependencies=dependency_map_list,
            )
            assert c0 == sys.getrefcount(node_n_inputs)
        else:
            lazy = self.lazy
            if lazy is None:
                lazy = config.vm.lazy
            if lazy is None:
                lazy = not all([(not th.lazy) for th in thunks])
            if not lazy:
                # there is no conditional in the graph
                if self.allow_gc:
                    vm = LoopGC(
                        nodes,
                        thunks,
                        pre_call_clear,
                        post_thunk_clear,
                    )
                else:
                    vm = Loop(
                        nodes,
                        thunks,
                        pre_call_clear,
                    )
            else:
                # Needed when allow_gc=True and profiling
                deps = self.compute_gc_dependencies(storage_map)
                vm = Stack(
                    nodes, thunks, pre_call_clear,
                    storage_map, compute_map,
                    self.fgraph, self.allow_gc,
                    len(updated_vars),
                    dependencies=deps,
                )
        return vm
Exemple #60
0
 def __iter__(self):
     for key, values in six.iteritems(self.__dict__):
         if not key.startswith('_'):
             yield key, values