Example #1
1
 def serialize(self, buff):
     """
 serialize message into buffer
 :param buff: buffer, ``StringIO``
 """
     try:
         _x = self.agent_one
         length = len(_x)
         if python3 or type(_x) == unicode:
             _x = _x.encode("utf-8")
             length = len(_x)
         buff.write(struct.pack("<I%ss" % length, length, _x))
         _x = self.agent_two
         length = len(_x)
         if python3 or type(_x) == unicode:
             _x = _x.encode("utf-8")
             length = len(_x)
         buff.write(struct.pack("<I%ss" % length, length, _x))
         _x = self
         buff.write(
             _struct_6d.pack(_x.rotationx, _x.rotationy, _x.angle, _x.referencex, _x.referencey, _x.evaluation)
         )
     except struct.error as se:
         self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
     except TypeError as te:
         self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
Example #2
0
 def send(self,stanza):
     """ Serialise stanza and put it on the wire. Assign an unique ID to it before send.
         Returns assigned ID."""
     if type(stanza) in [type(''), type(u'')]: return self._owner_send(stanza)
     if not isinstance(stanza,Protocol): _ID=None
     elif not stanza.getID():
         global ID
         ID+=1
         _ID=`ID`
         stanza.setID(_ID)
     else: _ID=stanza.getID()
     if self._owner._registered_name and not stanza.getAttr('from'): stanza.setAttr('from',self._owner._registered_name)
     if self._owner._route and stanza.getName()!='bind':
         to=self._owner.Server
         if stanza.getTo() and stanza.getTo().getDomain():
             to=stanza.getTo().getDomain()
         frm=stanza.getFrom()
         if frm.getDomain():
             frm=frm.getDomain()
         route=Protocol('route',to=to,frm=frm,payload=[stanza])
         stanza=route
     stanza.setNamespace(self._owner.Namespace)
     stanza.setParent(self._metastream)
     self._owner_send(stanza)
     return _ID
def formatTemplate(line):
    lastIndex = 0
    index = 0
    isAtPairBegin = False
    templateNames = []
    while index < len(line):
        index = str.find(line, "$", index)
        if index != -1:
            if isAtPairBegin:
                templateNames.append(line[lastIndex+1:index])
                isAtPairBegin = False
            else:
                isAtPairBegin = True
            lastIndex = index
            index = index + 1
        else:
            break
    # line = re.sub(r'[1-9]\.[0-9]+\.[0-9]+', newVersion, line)
    for template in templateNames:
        old = "${0}$".format(template)
        print(type(template))
        new = "%d" % (templateDict[template])
        print(type(new))
        line = str.replace(line, old, new)

    return line
Example #4
0
def showOverlap(mode, modes, *args, **kwargs):
    """Show overlap :func:`~matplotlib.pyplot.bar`.

    :arg mode: a single mode/vector
    :type mode: :class:`.Mode`, :class:`.Vector`
    :arg modes: multiple modes
    :type modes: :class:`.ModeSet`, :class:`.ANM`, :class:`.GNM`, :class:`.PCA`
    """

    import matplotlib.pyplot as plt
    if not isinstance(mode, (Mode, Vector)):
        raise TypeError('mode must be Mode or Vector, not {0}'
                        .format(type(mode)))
    if not isinstance(modes, (NMA, ModeSet)):
        raise TypeError('modes must be NMA or ModeSet, not {0}'
                        .format(type(modes)))
    overlap = abs(calcOverlap(mode, modes))
    if isinstance(modes, NMA):
        arange = np.arange(0.5, len(modes)+0.5)
    else:
        arange = modes.getIndices() + 0.5
    show = plt.bar(arange, overlap, *args, **kwargs)
    plt.title('Overlap with {0}'.format(str(mode)))
    plt.xlabel('{0} mode index'.format(modes))
    plt.ylabel('Overlap')
    if SETTINGS['auto_show']:
        showFigure()
    return show
Example #5
0
    def _add_delta(self, delta):
        """
        Add a timedelta-like, Tick, or TimedeltaIndex-like object
        to self.

        Parameters
        ----------
        delta : timedelta, np.timedelta64, Tick, TimedeltaArray, TimedeltaIndex

        Returns
        -------
        result : same type as self

        Notes
        -----
        The result's name is set outside of _add_delta by the calling
        method (__add__ or __sub__)
        """
        if isinstance(delta, (Tick, timedelta, np.timedelta64)):
            new_values = self._add_delta_td(delta)
        elif isinstance(delta, TimedeltaArrayMixin):
            new_values = self._add_delta_tdi(delta)
        elif is_timedelta64_dtype(delta):
            # ndarray[timedelta64] --> wrap in TimedeltaArray/Index
            delta = type(self)(delta)
            new_values = self._add_delta_tdi(delta)
        else:
            raise TypeError("cannot add the type {0} to a TimedeltaIndex"
                            .format(type(delta)))

        return type(self)(new_values, freq='infer')
Example #6
0
def showCumulOverlap(mode, modes, *args, **kwargs):
    """Show cumulative overlap using :func:`~matplotlib.pyplot.plot`.

    :type mode: :class:`.Mode`, :class:`.Vector`
    :arg modes: multiple modes
    :type modes: :class:`.ModeSet`, :class:`.ANM`, :class:`.GNM`, :class:`.PCA`
    """

    import matplotlib.pyplot as plt
    if not isinstance(mode, (Mode, Vector)):
        raise TypeError('mode must be NMA, ModeSet, Mode or Vector, not {0}'
                        .format(type(mode)))
    if not isinstance(modes, (NMA, ModeSet)):
        raise TypeError('modes must be NMA, ModeSet, or Mode, not {0}'
                        .format(type(modes)))
    cumov = (calcOverlap(mode, modes) ** 2).cumsum() ** 0.5
    if isinstance(modes, NMA):
        arange = np.arange(0.5, len(modes)+0.5)
    else:
        arange = modes.getIndices() + 0.5
    show = plt.plot(arange, cumov, *args, **kwargs)
    plt.title('Cumulative overlap with {0}'.format(str(mode)))
    plt.xlabel('{0} mode index'.format(modes))
    plt.ylabel('Cumulative overlap')
    plt.axis((arange[0]-0.5, arange[-1]+0.5, 0, 1))
    if SETTINGS['auto_show']:
        showFigure()
    return show
Example #7
0
    def _fetch_docs(cls, resource_name, query, options=None):
        '''
        Returns a document in deserialized (hash) form.
        
        If model caching is used, then the cache will be checked
        for the document before going to the remote data endpoint.

        See toothpick/cache.py for more about caching.
        '''
        try:
            attributes = cls._resource(resource_name).read(
                query=query,
                options=options
            )
                                                  
            # things like #find are expecting lists of documents, even if there's
            # only one, but that seems a little artifact-y.  I'm putting that here
            # as a constraint rather than forcing Resource implementors to worry
            # about it.
            if not type(attributes) == type([]):
                attributes = [attributes]

            return attributes
        except exceptions.NotFound:
            return []
Example #8
0
    def stepDone(self, result, step):
        """This method is called when the BuildStep completes. It is passed a
        status object from the BuildStep and is responsible for merging the
        Step's results into those of the overall Build."""

        terminate = False
        text = None
        if type(result) == types.TupleType:
            result, text = result
        assert type(result) == type(SUCCESS)
        log.msg(" step '%s' complete: %s" % (step.name, Results[result]))
        self.results.append(result)
        if text:
            self.text.extend(text)
        if not self.remote:
            terminate = True
        if result == FAILURE:
            if step.warnOnFailure:
                if self.result != FAILURE:
                    self.result = WARNINGS
            if step.flunkOnFailure:
                self.result = FAILURE
            if step.haltOnFailure:
                terminate = True
        elif result == WARNINGS:
            if step.warnOnWarnings:
                if self.result != FAILURE:
                    self.result = WARNINGS
            if step.flunkOnWarnings:
                self.result = FAILURE
        elif result == EXCEPTION:
            self.result = EXCEPTION
            terminate = True
        return terminate
Example #9
0
    def __init__(self, addr, zmq_type, bind=True, subscribe=None):
        self.sock = _get_ctxt().socket(zmq_type)
        self.addr = addr
        self.type = zmq_type
        self.subscriptions = []

        # Support failures on sending/receiving on wrong socket type.
        self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
        self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
        self.can_sub = zmq_type in (zmq.SUB, )

        # Support list, str, & None for subscribe arg (cast to list)
        do_sub = {
            list: subscribe,
            str: [subscribe],
            type(None): []
        }[type(subscribe)]

        for f in do_sub:
            self.subscribe(f)

        str_data = {'addr': addr, 'type': self.socket_s(),
                    'subscribe': subscribe, 'bind': bind}

        LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
        LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
        LOG.debug(_("-> bind: %(bind)s"), str_data)

        try:
            if bind:
                self.sock.bind(addr)
            else:
                self.sock.connect(addr)
        except Exception:
            raise RPCException(_("Could not open socket."))
Example #10
0
 def RandomGraph(self, nodes, edges, maxweight = 100.0):
     """
     Generates a graph of random edges.
     
     @param nodes: list of nodes or number of nodes in the random graph
     @param edges: number of edges to generate in the random graph
     @type edges: integer
     @param maxweight: maximum weight of each edge. default = 100.0
     @type maxweight: float
     """
     import random
     nodes_size = 0
     if type(nodes) == int:
         adjacency = [range(nodes)]
         nodes_size = nodes
         for node in range(nodes):
             adjacency.append([0 for x in range(nodes)])
     elif type(nodes) == list:
         adjacency = nodes
         nodes_size = len(nodes)
         for node in range(nodes_size):
             adjacency.append([0 for x in range(nodes_size)])
     else: raise FunctionParameterTypeError('nodes can only be a list \
             or integer')
     count = 0
     while count <= edges:
         edge = (int(random.uniform(0, nodes_size)) + 1, 
                 int(random.uniform(0, nodes_size)),
                 int(random.uniform(0, 1) * maxweight))
         if adjacency[edge[0]][edge[1]] == 0:
             adjacency[edge[0]][edge[1]] = edge[2]
             count = count + 1
     self.makeGraphFromAdjacency(adjacency)
Example #11
0
    def write_double_workaround(self, d):
        """
        Override the L{DataTypeMixIn.write_double} method to fix problems
        with doubles by using the third-party C{fpconst} library.

        @raise TypeError: Unexpected type for float C{d}.
        """
        if type(d) is not float:
            raise TypeError('expected a float (got:%r)' % (type(d),))

        if python.isNaN(d):
            if self._is_big_endian():
                self.write('\xff\xf8\x00\x00\x00\x00\x00\x00')
            else:
                self.write('\x00\x00\x00\x00\x00\x00\xf8\xff')
        elif python.isNegInf(d):
            if self._is_big_endian():
                self.write('\xff\xf0\x00\x00\x00\x00\x00\x00')
            else:
                self.write('\x00\x00\x00\x00\x00\x00\xf0\xff')
        elif python.isPosInf(d):
            if self._is_big_endian():
                self.write('\x7f\xf0\x00\x00\x00\x00\x00\x00')
            else:
                self.write('\x00\x00\x00\x00\x00\x00\xf0\x7f')
        else:
            write_double_workaround.old_func(self, d)
Example #12
0
 def __delitem__(self, index):
     if type(index) is int:
         del self.dict[self.list[index].name]
         del self.list[index]
     elif type(index) is str:
         self.list.remove(self[index]) #This must come first because we are using the dict to fetch the item
         del self.dict[index]
Example #13
0
def GetProperties(obj, propList):
	"""Given a MAPI object and a list of properties, return a list of property values.
	
	Allows a single property to be passed, and the result is a single object.
	
	Each request property can be an integer or a string.  Of a string, it is 
	automatically converted to an integer via the GetIdsFromNames function.
	
	If the property fetch fails, the result is None.
	"""
	bRetList = 1
	if type(propList) not in [TupleType, ListType]:
		bRetList = 0
		propList = (propList,)
	realPropList = []
	rc = []
	for prop in propList:
		if type(prop)!=IntType:	# Integer
			props = ( (mapi.PS_PUBLIC_STRINGS, prop), )
			propIds = obj.GetIDsFromNames(props, 0)
			prop = mapitags.PROP_TAG( mapitags.PT_UNSPECIFIED, mapitags.PROP_ID(propIds[0]))
		realPropList.append(prop)
		
	hr, data = obj.GetProps(realPropList,0)
	if hr != 0:
		data = None
		return None
	if bRetList:
		return [v[1] for v in data]
	else:
		return data[0][1]
Example #14
0
    def write_24bit_int(self, n):
        """
        Writes a 24 bit integer to the stream.

        @since: 0.4
        @param n: 24 bit integer
        @type n: C{int}
        @raise TypeError: Unexpected type for int C{n}.
        @raise OverflowError: Not in range.
        """
        if type(n) not in python.int_types:
            raise TypeError('expected an int (got:%r)' % (type(n),))

        if not -8388608 <= n <= 8388607:
            raise OverflowError("n is out of range")

        order = None

        if not self._is_big_endian():
            order = [0, 8, 16]
        else:
            order = [16, 8, 0]

        if n < 0:
            n += 0x1000000

        for x in order:
            self.write_uchar((n >> x) & 0xff)
    def test_build_composer_environment_converts_vars_to_str(self):
        ctx = utils.FormattedDict({
            'BUILD_DIR': '/usr/awesome',
            'PHP_VM': 'php',
            'TMPDIR': 'tmp',
            'LIBDIR': 'lib',
            'CACHE_DIR': 'cache',
            'PHPRC': '/usr/awesome/phpini',
            'MY_DICTIONARY': {'KEY': 'VALUE'},
        })

        write_config_stub = Dingus()

        with patches({
            'composer.extension.PHPComposerStrategy.write_config': write_config_stub
        }):
            self.extension_module.ComposerExtension(ctx)
            cr = self.extension_module.ComposerCommandRunner(ctx, None)

            built_environment = cr._build_composer_environment()

        for key, val in built_environment.iteritems():
            assert type(val) == str, \
                "Expected [%s]:[%s] to be type `str`, but found type [%s]" % (
                    key, val, type(val))
Example #16
0
def SetProperties( msg, propDict):
	""" Given a Python dictionary, set the objects properties.
	
	If the dictionary key is a string, then a property ID is queried
	otherwise the ID is assumed native.
	
	Coded for maximum efficiency wrt server calls - ie, maximum of
	2 calls made to the object, regardless of the dictionary contents
	(only 1 if dictionary full of int keys)
	"""

	newProps = []
	# First pass over the properties we should get IDs for.
	for key, val in propDict.iteritems():
		if type(key) in [str, unicode]:
			newProps.append((mapi.PS_PUBLIC_STRINGS, key))
	# Query for the new IDs
	if newProps: newIds = msg.GetIDsFromNames(newProps, mapi.MAPI_CREATE)
	newIdNo = 0
	newProps = []
	for key, val in propDict.iteritems():
		if type(key) in [str, unicode]:
			type_val=type(val)
			if type_val in [str, unicode]:
				tagType = mapitags.PT_UNICODE
			elif type_val==IntType:
				tagType = mapitags.PT_I4
			elif type_val==TimeType:
				tagType = mapitags.PT_SYSTIME
			else:
				raise ValueError("The type of object %s(%s) can not be written" % (repr(val),type_val))
			key = mapitags.PROP_TAG(tagType, mapitags.PROP_ID(newIds[newIdNo]))
			newIdNo = newIdNo + 1
		newProps.append( (key, val) )
	msg.SetProps(newProps)
Example #17
0
    def __init__(self,
                 batch_size,
                 target_lfiles=None,
                 source_lfiles=None,
                 order=0,
                 dtype="int64",
                 use_infinite_loop=True,
                 stop=-1,
                 output_format=None,
                 can_fit=False,
                 shuffle=False):

        assert type(source_lfiles) == list, "Target language file should be a list."

        if target_lfiles is not None:
            assert type(target_lfiles) == list, "Target language file should be a list."
            assert len(target_lfiles) == len(source_lfiles)

        self.batch_size = batch_size
        self.target_lfiles = target_lfiles
        self.source_lfiles = source_lfiles
        self.use_infinite_loop = use_infinite_loop
        self.target_langs = []
        self.source_langs = []
        self.order = order
        self.offset = 0
        self.data_len = 0
        self.stop = stop
        self.can_fit = can_fit
        self.dtype = dtype
        self.output_format = output_format
        self.shuffle = shuffle
        self.load_files()
 def serialize(self, buff):
   """
   serialize message into buffer
   :param buff: buffer, ``StringIO``
   """
   try:
     _x = self.caller_id
     length = len(_x)
     if python3 or type(_x) == unicode:
       _x = _x.encode('utf-8')
       length = len(_x)
     if python3:
       buff.write(struct.pack('<I%sB'%length, length, *_x))
     else:
       buff.write(struct.pack('<I%ss'%length, length, _x))
     _x = self.orig_caller_id
     length = len(_x)
     if python3 or type(_x) == unicode:
       _x = _x.encode('utf-8')
       length = len(_x)
     if python3:
       buff.write(struct.pack('<I%sB'%length, length, *_x))
     else:
       buff.write(struct.pack('<I%ss'%length, length, _x))
     _x = self.data
     length = len(_x)
     if python3 or type(_x) == unicode:
       _x = _x.encode('utf-8')
       length = len(_x)
     if python3:
       buff.write(struct.pack('<I%sB'%length, length, *_x))
     else:
       buff.write(struct.pack('<I%ss'%length, length, _x))
   except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
   except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
	def delete(self,id):
		if type(id)==type(dict()):
			self.cursor.execute("DELETE FROM " + self.name +
					    " WHERE "+self.fields[0] +" = %("+ self.fields[0]+")s",id);
		elif type(id)==type(int()):
			self.cursor.execute("DELETE FROM " + self.name +
					    " WHERE "+self.fields[0] +" = %s",id);
Example #20
0
def QScomplex(val):
    if QSMODE == MODE_NORM:
        return complex(val)
    else:
        if type(val) is str or type(val) is unicode:
            if 'nan' in val:
                return mpmath.mpc(real='nan',imag='nan')
            real = None
            imag = None
            delim = None
            if '+' in val[1:]:
                delim = '+'
            elif '-' in val[1:]:
                delim = '-'
            if delim is None:
                if 'j' in val:
                    imag = val.replace('j','')
                else:
                    real = val
            else:
                index = val[1:].find(delim) + 1
                real = val[:index]
                imag = val[index:].replace('j','')
            return mpmath.mpc(real=real,imag=imag)
        else:
            return mpmath.mpc(val)
Example #21
0
	def unset(cls, client, resource, args) :
		ur""" Use this API to unset the properties of csaction resource.
		Properties that need to be unset are specified in args array.
		"""
		try :
			if type(resource) is not list :
				unsetresource = csaction()
				if type(resource) !=  type(unsetresource):
					unsetresource.name = resource
				else :
					unsetresource.name = resource.name
				return unsetresource.unset_resource(client, args)
			else :
				if type(resource[0]) != cls :
					if (resource and len(resource) > 0) :
						unsetresources = [ csaction() for _ in range(len(resource))]
						for i in range(len(resource)) :
							unsetresources[i].name = resource[i]
				else :
					if (resource and len(resource) > 0) :
						unsetresources = [ csaction() for _ in range(len(resource))]
						for i in range(len(resource)) :
							unsetresources[i].name = resource[i].name
				result = cls.unset_bulk_request(client, unsetresources, args)
			return result
		except Exception as e :
			raise e
	def delete(cls, client, resource) :
		""" Use this API to delete aaapreauthenticationpolicy.
		"""
		try :
			if type(resource) is not list :
				deleteresource = aaapreauthenticationpolicy()
				if type(resource) !=  type(deleteresource):
					deleteresource.name = resource
				else :
					deleteresource.name = resource.name
				return deleteresource.delete_resource(client)
			else :
				if type(resource[0]) != cls :
					if (resource and len(resource) > 0) :
						deleteresources = [ aaapreauthenticationpolicy() for _ in range(len(resource))]
						for i in range(len(resource)) :
							deleteresources[i].name = resource[i]
				else :
					if (resource and len(resource) > 0) :
						deleteresources = [ aaapreauthenticationpolicy() for _ in range(len(resource))]
						for i in range(len(resource)) :
							deleteresources[i].name = resource[i].name
				result = cls.delete_bulk_request(client, deleteresources)
			return result
		except Exception as e :
			raise e
def load_data(name, plotdir, print_out=True):
    "Read data and split into train, test data."
    df = read_data(name)
    train, test = train_test_split(df, test_size=0.3)
#   plot_scatter_matrix(train, plotdir)  # takes a while, not that useful 
    yvars = ['risk', 'Y']
    train_y = train[yvars]
    test_y  = test[yvars]
#   train_r = train['risk']    # for five-way multi-class classification
    train = train.drop(['risk', 'Y'], axis=1)
    test  = test.drop(['risk', 'Y'],  axis=1)
    if print_out:
        print("train test types %s %s %s %s" % (type(train), type(test), type(train_y), type(test_y)))
        print("train test shapes %s %s %s %s" % (train.shape, test.shape, train_y.shape, test_y.shape))
        print("train head\n%s" % (train[:3]))
        print("test head\n%s" % (test[:3]))
        print("train_y set %s, test_y set %s" % (set(train_y['Y']), set(test_y['Y'])))
        print("train_y stats\n%s\ntest_y stats\n%s" % (train_y.describe(), test_y.describe()))

#   drop_col = ['b_sugar_up']
#   print('dropping high std/mean columns', drop_col)
#   train = train.drop(drop_col, axis=1)
#   test  = test.drop(drop_col, axis=1)
#   drop_col = ['age','exer_slope']
#   print('dropping low importance columns', drop_col)
#   train = train.drop(drop_col, axis=1)
#   test  = test.drop(drop_col, axis=1)
    return train, test, train_y, test_y
Example #24
0
def parseString2Pagenum(parent, string, nodialog=False):
    """ Parse a string with a list of pagenumbers to an integer list with
        page numbers.
        e.g. "1-3,5,7" --> [1,2,3,5,7]
        parent is important
    """
    listFull = string.split(",")
    PageNumbers = list()
    try:
        for item in listFull:
            pagerange = item.split("-")
            start = pagerange[0].strip()
            start = int(filter(type(start).isdigit, start))
            end = pagerange[-1].strip()
            end = int(filter(type(end).isdigit, end))
            for i in np.arange(end-start+1)+start:
                PageNumbers.append(i)
        PageNumbers.sort()
        return PageNumbers
    except:
        if nodialog is False:
            errstring = "Invalid syntax in page selection: "+string+\
                        ". Please use a comma separated list with"+\
                        " optional dashes, e.g. '1-3,6,8'." 
            try:
                wx.MessageDialog(parent, errstring, "Error", 
                                  style=wx.ICON_ERROR|wx.OK|wx.STAY_ON_TOP)
            except:
                raise ValueError(errstring)
        else:
            raise ValueError(errstring)
        return None
Example #25
0
 def line(self):
     x1 = self.getleft()
     x2 = self.getright()
     align = self.align
     y = self.y
     
     if len(self.items) != 0 and type(self.items[-1]) == tuple:
         del self.items[-1]
     
     w = 0
     for e in self.items:
         if type(e) == tuple: w += e[0]
         else: w += e.rect.w
         
     if align == -1: x = x1
     elif align == 0: 
         x = x1 + ((x2-x1)-w)/2
         self.fit = 0
     elif align == 1: x = x2 - w
         
     for e in self.items:
         if type(e) == tuple: x += e[0]
         else:
             e.rect.x,e.rect.y = x,y
             self.widgets.append(e)
             x += e.rect.w
     
     self.items = []
     self.y = self.y + self.h
     self.x = self.getleft()
     self.h = 0
Example #26
0
    def __init__(self, name, args=[], opts=[], **kwargs):
        """
        Base class for POV objects.

        @param name: POV object name
        @param args: compulsory (comma separated?) pov args XX commas don't seem to matter?
        @param opts: eg. CSG items
        @param kwargs: key value pairs
        """
        #print "Item",name,args,opts,kwargs
        self.name = name

        args = list(args)
        for i in range(len(args)):
            args[i] = map_arg(args[i])
        self.args = flatten(args)

        opts = list(opts)
        for i in range(len(opts)):
            opts[i] = map_arg(opts[i])
        self.opts = flatten(opts)

        self.kwargs = dict(kwargs)  # take a copy
        for key, val in self.kwargs.items():
            if type(val) == tuple or type(val) == list:
                self.kwargs[key] = map_arg(val)
Example #27
0
def writedoc(key,top=False):
    """Write HTML documentation to a file in the current directory."""
    if(type(key) == str and (key == "modules" or key == "/.")):
        heading = pydoc.html.heading(
            '<br><big><big><strong>&nbsp;'
            'Python: Index of Modules'
            '</strong></big></big>',
            '#ffffff', '#7799ee')
        builtins = []
        for name in sys.builtin_module_names:
            builtins.append('<a href="%s">%s</a>' % (cgi.escape(name,quote=True), cgi.escape(name)))
        indices = ['<p>Built-in modules: ' + cgi.escape(join(builtins, ', '))]
        seen = {}
        for dir in pydoc.pathdirs():
            indices.append(pydoc.html.index(dir, seen))
        print cleanlinks(heading + join(indices))
        return

    if(type(key) != types.ModuleType):
        object = pydoc.locate(key)
        if(object == None and top):
            print "could not locate module/object for key " + \
                   cgi.escape(key) + "<br><a href=\"pydoc:modules\">go to index</a>";
    else:
        object = key
            
    if object:
        print cleanlinks(pydoc.html.page(pydoc.describe(object), pydoc.html.document(object)))
Example #28
0
    def check_result(self, expected, result, table, test_name):
        # TODO: don't convert to unicode, type inference for smallint is
        # currently broken, that's the reason at the moment.
        assert result, "No result set received."

        for key, value in expected.iteritems():
          self.assertEquals(unicode(result[key]), unicode(value), """
             ========================================================

             Data: {expected}

             ========================================================

             Failed {test_name} test on
             Table: "{table}"
             Field: "{key}"
               expected: {expected_value} ({expected_type})
               result: {result_value} ({result_type})

             ========================================================
            """.format(
                  expected = repr(expected),
                  test_name = test_name,
                  table = table,
                  key = key,
                  expected_value = value,
                  result_value = result[key],
                  expected_type = type(value),
                  result_type = type(result[key])
              ))
Example #29
0
    def test_listings_and_commands_and_output(self):
        self.parse_listings()

        # sanity checks
        self.assertEqual(self.listings[0].type, 'code listing with git ref')
        self.assertEqual(type(self.listings[1]), Command)
        self.assertEqual(type(self.listings[2]), Output)

        self.sourcetree.start_with_checkout(self.chapter_no)
        # other prep
        self.sourcetree.run_command('python3 manage.py syncdb --noinput')

        # skips
        self.skip_with_check(25, 'the -b means ignore whitespace')
        self.skip_with_check(28, 'leave static, for now')
        self.skip_with_check(51, 'will now show all the bootstrap')

        # hack fast-forward
        skip = False
        if skip:
            self.pos = 55
            self.sourcetree.run_command('git checkout {0}'.format(
                self.sourcetree.get_commit_spec('ch07l018')
            ))

        while self.pos < len(self.listings):
            print(self.pos)
            self.recognise_listing_and_process_it()


        self.assert_all_listings_checked(self.listings)
        self.check_final_diff(self.chapter_no, ignore_moves=True)
Example #30
0
def convert_ops(ops, s, start, idxmap):
    syms = []
    for op in ops:
        if type(op) in ops_map:
            syms.append(ops_map[type(op)])
        else:
            print("[WARNING] operator %s is missing from ops_map, "
                  "please report the bug on GitHub" % op)

    i = start
    j = 0
    ret = []
    while i < len(s) and j < len(syms):
        oplen = len(syms[j])
        if s[i:i+oplen] == syms[j]:
            op_node = Name(syms[j], None)
            op_node.node_start = i
            op_node.node_end = i+oplen
            op_node.lineno, op_node.col_offset = map_line_col(idxmap, i)
            ret.append(op_node)
            j += 1
            i = op_node.node_end
        else:
            i += 1
    return ret
Example #31
0
 def __eq__(self, other):
     return False if not isinstance(self, type(other)) else \
            all(self._compare_neurites(other, ttype) for ttype in
                [TreeType.axon, TreeType.basal_dendrite,
                 TreeType.apical_dendrite, TreeType.undefined])
Example #32
0
    def __init__(self, model, data, session=None, learning_phase_flags=None):
        """An explainer object for a deep model using a given background dataset.

        Note that the complexity of the method scales linearly with the number of background data
        samples. Passing the entire training dataset as `data` will give very accurate expected
        values, but be unreasonably expensive. The variance of the expectation estimates scale by
        roughly 1/sqrt(N) for N background data samples. So 100 samples will give a good estimate,
        and 1000 samples a very good estimate of the expected values.

        Parameters
        ----------
        model : tf.keras.Model or (input : [tf.Operation], output : tf.Operation)
            A keras model object or a pair of TensorFlow operations (or a list and an op) that
            specifies the input and output of the model to be explained. Note that SHAP values
            are specific to a single output value, so you get an explanation for each element of
            the output tensor (which must be a flat rank one vector).

        data : [numpy.array] or [pandas.DataFrame] or function
            The background dataset to use for integrating out features. DeepExplainer integrates
            over all these samples for each explanation. The data passed here must match the input
            operations given to the model. If a function is supplied, it must be a function that
            takes a particular input example and generates the background dataset for that example
        session : None or tensorflow.Session
            The TensorFlow session that has the model we are explaining. If None is passed then
            we do our best to find the right session, first looking for a keras session, then
            falling back to the default TensorFlow session.

        learning_phase_flags : None or list of tensors
            If you have your own custom learning phase flags pass them here. When explaining a prediction
            we need to ensure we are not in training mode, since this changes the behavior of ops like
            batch norm or dropout. If None is passed then we look for tensors in the graph that look like
            learning phase flags (this works for Keras models). Note that we assume all the flags should
            have a value of False during predictions (and hence explanations).

        """
        # try and import keras and tensorflow
        global tf, tf_ops, tf_backprop, tf_execute, tf_gradients_impl
        if tf is None:
            from tensorflow.python.framework import (
                ops as tf_ops,
            )  # pylint: disable=E0611
            from tensorflow.python.ops import (
                gradients_impl as tf_gradients_impl,
            )  # pylint: disable=E0611
            from tensorflow.python.eager import backprop as tf_backprop
            from tensorflow.python.eager import execute as tf_execute

            if not hasattr(tf_gradients_impl, "_IsBackpropagatable"):
                from tensorflow.python.ops import gradients_util as tf_gradients_impl
            import tensorflow as tf

            if LooseVersion(tf.__version__) < LooseVersion("1.4.0"):
                warnings.warn(
                    "Your TensorFlow version is older than 1.4.0 and not supported."
                )
        global keras
        if keras is None:
            try:
                import keras

                warnings.warn(
                    "keras is no longer supported, please use tf.keras instead."
                )
            except:
                pass

        # determine the model inputs and outputs
        self.model_inputs = _get_model_inputs(model)
        self.model_output = _get_model_output(model)
        assert (
            type(self.model_output) != list
        ), "The model output to be explained must be a single tensor!"
        assert (
            len(self.model_output.shape) < 3
        ), "The model output must be a vector or a single value!"
        self.multi_output = True
        if len(self.model_output.shape) == 1:
            self.multi_output = False

        if tf.executing_eagerly():
            if type(model) is tuple or type(model) is list:
                assert (
                    len(model) == 2
                ), "When a tuple is passed it must be of the form (inputs, outputs)"
                from tensorflow.keras import Model

                self.model = Model(model[0], model[1])
            else:
                self.model = model

        # check if we have multiple inputs
        self.multi_input = True
        if type(self.model_inputs) != list or len(self.model_inputs) == 1:
            self.multi_input = False
            if type(self.model_inputs) != list:
                self.model_inputs = [self.model_inputs]
        if type(data) != list and (hasattr(data, "__call__") == False):
            data = [data]
        self.data = data

        self._vinputs = {}  # used to track what op inputs depends on the model inputs
        self.orig_grads = {}

        if not tf.executing_eagerly():
            self.session = _get_session(session)

        self.graph = _get_graph(self)

        # if no learning phase flags were given we go looking for them
        # ...this will catch the one that keras uses
        # we need to find them since we want to make sure learning phase flags are set to False
        if self.graph is not None:
            if learning_phase_flags is None:
                self.learning_phase_ops = []
                for op in self.graph.get_operations():
                    if (
                        "learning_phase" in op.name
                        and op.type == "Const"
                        and len(op.outputs[0].shape) == 0
                    ):
                        if op.outputs[0].dtype == tf.bool:
                            self.learning_phase_ops.append(op)
                self.learning_phase_flags = [
                    op.outputs[0] for op in self.learning_phase_ops
                ]
            else:
                self.learning_phase_ops = [t.op for t in learning_phase_flags]
        else:
            self.learning_phase_ops = self.learning_phase_flags = []

        # save the expected output of the model
        # if self.data is a function, set self.expected_value to None
        if hasattr(self.data, "__call__"):
            self.expected_value = None
        else:
            if self.data[0].shape[0] > 5000:
                warnings.warn(
                    "You have provided over 5k background samples! "
                    "For better performance consider using smaller random sample."
                )
            if not tf.executing_eagerly():
                self.expected_value = self.run(
                    self.model_output, self.model_inputs, self.data
                ).mean(0)
            else:
                self.expected_value = tf.reduce_mean(self.model(self.data), 0)

        if not tf.executing_eagerly():
            self._init_between_tensors(
                getattr(self.model_output, "op", self.model_output), self.model_inputs
            )
        else:
            self.between_tensors = []
        # make a blank array that will get lazily filled in with the SHAP value computation
        # graphs for each output. Lazy is important since if there are 1000 outputs and we
        # only explain the top 5 it would be a waste to build graphs for the other 995
        if not self.multi_output:
            self.phi_symbolics = [None]
        else:
            noutputs = self.model_output.shape.as_list()[1]
            if noutputs is not None:
                self.phi_symbolics = [None for i in range(noutputs)]
            else:
                raise Exception(
                    "The model output tensor to be explained cannot have a static shape in dim 1 of None!"
                )
Example #33
0
# 학습 수행
df_clf.fit(x_train, y_train)

# 학습이 완료된 DecisionTreeClassifier객체에서 테스트 데이터 세트로 예측 수행
pred = df_clf.predict(x_test)

# 예측 정확도 확인
from sklearn.metrics import accuracy_score
print('예측 정확도: {0:.4f}'.format(accuracy_score(y_test, pred)))
print('예측 정확도:', round(accuracy_score(y_test, pred), 4))
print()

# iris클래스 확인
iris_data = load_iris()
print(type(iris_data))
print()

# iris 데이터 세트 key값 확인
keys = iris.keys()
print('붓꽃 데이터 세트의 키들:', keys)
print()

# iris가 반환하는 객체의 key가 가리키는 값 출력
print('feature_names의 type:', type(iris_data.feature_names))
print('feature_names의 shape:', len(iris_data.feature_names))
print(iris_data.feature_names)
print()

print('target_names의 type:', type(iris_data.target_names))
print('target_names의 shape:', iris_data.target_names.shape)
def create_instrumented_model(args, **kwargs):
    '''
    Creates an instrumented model out of a namespace of arguments that
    correspond to ArgumentParser command-line args:
      model: a string to evaluate as a constructor for the model.
      pthfile: (optional) filename of .pth file for the model.
      layers: a list of layers to instrument, defaulted if not provided.
      edit: True to instrument the layers for editing.
      gen: True for a generator model.  One-pixel input assumed.
      imgsize: For non-generator models, (y, x) dimensions for RGB input.
      cuda: True to use CUDA.
  
    The constructed model will be decorated with the following attributes:
      input_shape: (usually 4d) tensor shape for single-image input.
      output_shape: 4d tensor shape for output.
      feature_shape: map of layer names to 4d tensor shape for featuremaps.
      retained: map of layernames to tensors, filled after every evaluation.
      ablation: if editing, map of layernames to [0..1] alpha values to fill.
      replacement: if editing, map of layernames to values to fill.

    When editing, the feature value x will be replaced by:
        `x = (replacement * ablation) + (x * (1 - ablation))`
    '''

    args = EasyDict(vars(args), **kwargs)

    # Construct the network
    if args.model is None:
        pbar.print('No model specified')
        return None
    if isinstance(args.model, torch.nn.Module):
        model = args.model
    else:
        model = autoimport_eval(args.model)
    # Unwrap any DataParallel-wrapped model
    if isinstance(model, torch.nn.DataParallel):
        model = next(model.children())

    # Load its state dict
    meta = {}
    if getattr(args, 'pthfile', None) is not None:
        data = torch.load(args.pthfile)
        modelkey = getattr(args, 'modelkey', 'state_dict')
        if modelkey in data:
            meta = {}
            for key in data:
                if isinstance(data[key], numbers.Number):
                    meta[key] = data[key]
            data = data[modelkey]
        submodule = getattr(args, 'submodule', None)
        if submodule is not None and len(submodule):
            remove_prefix = submodule + '.'
            data = {
                k[len(remove_prefix):]: v
                for k, v in data.items() if k.startswith(remove_prefix)
            }
            if not len(data):
                pbar.print('No submodule %s found in %s' %
                           (submodule, args.pthfile))
                return None
        model.load_state_dict(data,
                              strict=not getattr(args, 'unstrict', False))

    # Decide which layers to instrument.
    if getattr(args, 'layer', None) is not None:
        args.layers = [args.layer]
    # If the layer '?' is the only specified, just print out all layers.
    if getattr(args, 'layers', None) is not None:
        if len(args.layers) == 1 and args.layers[0] == ('?', '?'):
            for name, layer in model.named_modules():
                pbar.print(name)
            import sys
            sys.exit(0)
    if getattr(args, 'layers', None) is None:
        # Skip wrappers with only one named model
        container = model
        prefix = ''
        while len(list(container.named_children())) == 1:
            name, container = next(container.named_children())
            prefix += name + '.'
        # Default to all nontrivial top-level layers except last.
        args.layers = [
            prefix + name for name, module in container.named_children()
            if type(module).__module__ not in [
                # Skip ReLU and other activations.
                'torch.nn.modules.activation',
                # Skip pooling layers.
                'torch.nn.modules.pooling'
            ]
        ][:-1]
        pbar.print('Defaulting to layers: %s' % ' '.join(args.layers))

    # Now wrap the model for instrumentation.
    model = InstrumentedModel(model)
    model.meta = meta

    # Instrument the layers.
    model.retain_layers(args.layers)
    model.eval()
    if args.cuda:
        model.cuda()

    # Annotate input, output, and feature shapes
    annotate_model_shapes(model,
                          gen=getattr(args, 'gen', False),
                          imgsize=getattr(args, 'imgsize', None))
    return model
Example #35
0
 def __init__(self, string: FormattedString):
     if type(self) is Message:
         raise NotImplementedError
     self.text, self.term, self.html = string.get_text(), string.get_term(), string.get_html()
Example #36
0
class OldStyle:
    def __init__(self, name, description):
        self.name = name
        self.description = description


class NewStyle(object):
    def __init__(self, name, description):
        self.name = name
        self.description = description


if __name__ == '__main__':
    old = OldStyle('old', 'old style class')
    print old
    print type(old)
    print dir(old)

    new = NewStyle('style', 'new style class')
    print new
    print type(new)
    print dir(new)
Example #37
0
sys.stdout = open('/tmp/work/output/stdout', 'w')
sys.stderr = open('/tmp/work/output/stderr', 'w')

# Try to import student's code
sys.path.append('/tmp/work')
try:
    import q1
except Exception as e:
    print(e, file=sys.stderr)
    sys.exit(0)

# Execute student's code
try:
    find = [f for (n, f) in inspect.getmembers(q1, inspect.isfunction) if n == 'printhelloworld']
    if len(find) != 1:
        raise pythia.UndeclaredException('printhelloworld')
    if not callable(find[0]):
        raise pythia.BadTypeException('printhelloworld', type(find[0]), 'function')
    spec = inspect.getargspec(find[0])
    if len(spec.args) != 0:
        raise pythia.WrongParameterNumberException('printhelloworld', len(spec.args), 0)
    q1.printhelloworld()
except pythia.UndeclaredException as e:
    print('exception:undeclared:{}'.format(e.name), file=sys.stderr)
except pythia.BadTypeException as e:
    print('exception:badtype:{}:{}:{}'.format(e.name, e.actualtype, e.expectedtype), file=sys.stderr)
except pythia.WrongParameterNumberException as e:
    print('exception:wrongparameterexception:{}:{}:{}'.format(e.name, e.actualnumber, e.expectednumber), file=sys.stderr)
except Exception as e:
    print('exception:{}'.format(e), file=sys.stderr)
    def make_value(source_instance, **kwargs):
        def cast(val):
            try:
                pytype = {
                    'string': unicode,
                    'integer': int,
                    'number': float,
                    'boolean': bool,
                    'array': list,
                    'object': dict,
                }[target_type]
            except KeyError:
                raise SyntaxError(_("Unsupported type {}".format(target_type)))
            try:
                return pytype(val)
            except (ValueError, TypeError):
                raise ValueError(
                    _("Unable to cast {} to {}".format(val, pytype)))

        target_type = kwargs.get('type')
        if not target_type:
            raise SyntaxError(
                _("A 'type' must be specified for the target value"))

        if target_type == 'null':
            return None

        const = kwargs.pop('const', None)
        if const is not None:
            return cast(const)

        source_prop = kwargs.pop('source', None)
        if source_prop is not None:
            if type(source_instance) is not dict:
                raise SyntaxError(
                    _("The 'source' keyword can only be used with object instances"
                      ))
            if isinstance(source_prop, basestring):
                if source_prop in source_instance:
                    return make_value(source_instance[source_prop], **kwargs)
                return None
            if type(source_prop) is list:
                if target_type != 'string':
                    raise SyntaxError(
                        _("The 'source' keyword can only specify a list of properties if the target type is 'string'"
                          ))
                separator = kwargs.pop('separator', None)
                if not isinstance(separator, basestring):
                    raise SyntaxError(
                        _("A 'separator' must be specified if 'source' is a list of properties"
                          ))
                result = []
                for source_prop_i in source_prop:
                    if source_prop_i in source_instance:
                        value = make_value(source_instance[source_prop_i],
                                           **kwargs)
                        if value:
                            result += [value]
                return separator.join(result)
            raise SyntaxError(_("Invalid value for 'source' keyword"))

        converter = kwargs.pop('converter', None)
        if converter is not None:
            if type(converter) is dict:
                if target_type != 'string':
                    raise SyntaxError(
                        _("A dictionary converter can only be used for a target type of 'string'"
                          ))
                return converter.get(unicode(source_instance),
                                     unicode(source_instance))
            if isinstance(converter, basestring):
                if converter not in validator.converters:
                    raise SyntaxError(
                        _("Converter '{}' not found".format(converter)))
                converter_fn = validator.converters[converter]
                try:
                    return cast(converter_fn(source_instance))
                except Exception, e:
                    raise ValueError(
                        _("Unable to convert value using '{}' function: {}".
                          format(converter, e)))
            raise SyntaxError(_("Invalid value for 'converter' keyword"))
        value_schema = map_params.get('value')
        errors = False

        if not target_path:
            errors = True
            yield jsonschema.ValidationError(
                _("A 'target' location must be defined"))
        else:
            try:
                jsonpointer.JsonPointer(target_path)
            except (TypeError, jsonpointer.JsonPointerException):
                errors = True
                yield jsonschema.ValidationError(
                    _("'target': invalid JSON pointer"))

        if type(value_schema) is not dict:
            errors = True
            yield jsonschema.ValidationError(
                _("A 'value' schema dictionary must be defined"))

        if errors:
            return

        try:
            value = make_value(instance, **value_schema)
            if value is not None:
                if target_path.endswith('/-') and type(value) is list:
                    # merge an array onto an array element
                    operations = [{
                        'op': 'add',
                        'path': target_path,
Example #40
0
    def shap_values(
        self, X, ranked_outputs=None, output_rank_order="max", check_additivity=True
    ):
        # check if we have multiple inputs
        if not self.multi_input:
            if type(X) == list and len(X) != 1:
                assert False, "Expected a single tensor as model input!"
            elif type(X) != list:
                X = [X]
        else:
            assert type(X) == list, "Expected a list of model inputs!"
        assert len(self.model_inputs) == len(
            X
        ), "Number of model inputs (%d) does not match the number given (%d)!" % (
            len(self.model_inputs),
            len(X),
        )

        # rank and determine the model outputs that we will explain
        if ranked_outputs is not None and self.multi_output:
            if not tf.executing_eagerly():
                model_output_values = self.run(self.model_output, self.model_inputs, X)
            else:
                model_output_values = self.model(X)

            if output_rank_order == "max":
                model_output_ranks = np.argsort(-model_output_values)
            elif output_rank_order == "min":
                model_output_ranks = np.argsort(model_output_values)
            elif output_rank_order == "max_abs":
                model_output_ranks = np.argsort(np.abs(model_output_values))
            else:
                assert False, "output_rank_order must be max, min, or max_abs!"
            model_output_ranks = model_output_ranks[:, :ranked_outputs]
        else:
            model_output_ranks = np.tile(
                np.arange(len(self.phi_symbolics)), (X[0].shape[0], 1)
            )

        # compute the attributions
        output_phis = []
        for output_index in range(model_output_ranks.shape[1]):
            phis = []
            # iterate over input tensors and prefill with zeroes
            for input_index in range(len(X)):
                phis.append(np.zeros(X[input_index].shape))

            for sample_index in range(X[0].shape[0]):
                if hasattr(self.data, "__call__"):
                    bg_data = self.data([X[inp_idx][sample_index] for inp_idx in range(len(X))])
                    if type(bg_data) != list:
                        bg_data = [bg_data]
                else:
                    bg_data = self.data

                # tile the inputs to line up with the background data samples
                tiled_x = [
                    np.broadcast_to(
                        X[input_idx][sample_index],
                        shape=bg_data[input_idx].shape
                    )
                    for input_idx in range(len(X))
                ]

                # we use the first sample for the current sample and the rest for the references
                joint_input = [
                    np.concatenate([tiled_x[l], bg_data[l]], 0) for l in range(len(X))
                ]

                # run attribution computation graph
                feature_ind = model_output_ranks[sample_index, output_index]
                sample_phis = self.run(
                    self.phi_symbolic(feature_ind), self.model_inputs, joint_input
                )

                # assign the attributions to the right part of the output arrays
                for inp_idx in range(len(X)):
                    phis[inp_idx][sample_index] = (
                        sample_phis[inp_idx][bg_data[inp_idx].shape[0] :]
                        * (X[inp_idx][sample_index] - bg_data[inp_idx])
                    ).mean(0)

            output_phis.append(phis[0] if not self.multi_input else phis)

        # check that the SHAP values sum up to the model output
        if check_additivity:
            if not tf.executing_eagerly():
                model_output = self.run(self.model_output, self.model_inputs, X)
            else:
                model_output = self.model(X)
            for inp_idx in range(len(self.expected_value)):
                if not self.multi_input:
                    diffs = (
                        model_output[:, inp_idx]
                        - self.expected_value[inp_idx]
                        - output_phis[inp_idx].sum(axis=tuple(range(1, output_phis[inp_idx].ndim)))
                    )
                else:
                    diffs = model_output[:, inp_idx] - self.expected_value[inp_idx]
                    for output_index in range(len(output_phis[inp_idx])):
                        diffs -= output_phis[inp_idx][output_index].sum(
                            axis=tuple(range(1, output_phis[inp_idx][output_index].ndim))
                        )
                assert np.abs(diffs).max() < 1e-2, (
                    "The SHAP explanations do not sum up to the model's output! This is either because of a "
                    "rounding error or because an operator in your computation graph was not fully supported. If "
                    "the sum difference of %f is significant compared the scale of your model outputs please post "
                    "as a github issue, with a reproducable example if possible so we can debug it."
                    % np.abs(diffs).max()
                )

        if not self.multi_output:
            return output_phis[0]
        elif ranked_outputs is not None:
            return output_phis, model_output_ranks
        else:
            return output_phis
Example #41
0
    def __init__(
        self,
        *,
        host: str = "aiplatform.googleapis.com",
        credentials: ga_credentials.Credentials = None,
        credentials_file: str = None,
        scopes: Sequence[str] = None,
        channel: grpc.Channel = None,
        api_mtls_endpoint: str = None,
        client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
        ssl_channel_credentials: grpc.ChannelCredentials = None,
        client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
        quota_project_id: Optional[str] = None,
        client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
        always_use_jwt_access: Optional[bool] = False,
    ) -> None:
        """Instantiate the transport.

        Args:
            host (Optional[str]):
                 The hostname to connect to.
            credentials (Optional[google.auth.credentials.Credentials]): The
                authorization credentials to attach to requests. These
                credentials identify the application to the service; if none
                are specified, the client will attempt to ascertain the
                credentials from the environment.
                This argument is ignored if ``channel`` is provided.
            credentials_file (Optional[str]): A file with credentials that can
                be loaded with :func:`google.auth.load_credentials_from_file`.
                This argument is ignored if ``channel`` is provided.
            scopes (Optional(Sequence[str])): A list of scopes. This argument is
                ignored if ``channel`` is provided.
            channel (Optional[grpc.Channel]): A ``Channel`` instance through
                which to make calls.
            api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
                If provided, it overrides the ``host`` argument and tries to create
                a mutual TLS channel with client SSL credentials from
                ``client_cert_source`` or application default SSL credentials.
            client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
                Deprecated. A callback to provide client SSL certificate bytes and
                private key bytes, both in PEM format. It is ignored if
                ``api_mtls_endpoint`` is None.
            ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
                for grpc channel. It is ignored if ``channel`` is provided.
            client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
                A callback to provide client certificate bytes and private key bytes,
                both in PEM format. It is used to configure mutual TLS channel. It is
                ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
            quota_project_id (Optional[str]): An optional project to use for billing
                and quota.
            client_info (google.api_core.gapic_v1.client_info.ClientInfo):
                The client info used to send a user-agent string along with
                API requests. If ``None``, then default info will be used.
                Generally, you only need to set this if you're developing
                your own client library.
            always_use_jwt_access (Optional[bool]): Whether self signed JWT should
                be used for service account credentials.

        Raises:
          google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
              creation failed for any reason.
          google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
              and ``credentials_file`` are passed.
        """
        self._grpc_channel = None
        self._ssl_channel_credentials = ssl_channel_credentials
        self._stubs: Dict[str, Callable] = {}
        self._operations_client = None

        if api_mtls_endpoint:
            warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
        if client_cert_source:
            warnings.warn("client_cert_source is deprecated", DeprecationWarning)

        if channel:
            # Ignore credentials if a channel was passed.
            credentials = False
            # If a channel was explicitly provided, set it.
            self._grpc_channel = channel
            self._ssl_channel_credentials = None

        else:
            if api_mtls_endpoint:
                host = api_mtls_endpoint

                # Create SSL credentials with client_cert_source or application
                # default SSL credentials.
                if client_cert_source:
                    cert, key = client_cert_source()
                    self._ssl_channel_credentials = grpc.ssl_channel_credentials(
                        certificate_chain=cert, private_key=key
                    )
                else:
                    self._ssl_channel_credentials = SslCredentials().ssl_credentials

            else:
                if client_cert_source_for_mtls and not ssl_channel_credentials:
                    cert, key = client_cert_source_for_mtls()
                    self._ssl_channel_credentials = grpc.ssl_channel_credentials(
                        certificate_chain=cert, private_key=key
                    )

        # The base transport sets the host, credentials and scopes
        super().__init__(
            host=host,
            credentials=credentials,
            credentials_file=credentials_file,
            scopes=scopes,
            quota_project_id=quota_project_id,
            client_info=client_info,
            always_use_jwt_access=always_use_jwt_access,
        )

        if not self._grpc_channel:
            self._grpc_channel = type(self).create_channel(
                self._host,
                credentials=self._credentials,
                credentials_file=credentials_file,
                scopes=self._scopes,
                ssl_credentials=self._ssl_channel_credentials,
                quota_project_id=quota_project_id,
                options=[
                    ("grpc.max_send_message_length", -1),
                    ("grpc.max_receive_message_length", -1),
                ],
            )

        # Wrap messages. This must be done after self._grpc_channel exists
        self._prep_wrapped_messages(client_info)
def map_to_validator(validator, map_params, instance, schema):
    """
    "mapTo" keyword validator: provides flexible copying of data from the instance being
    validated to a different location in the document, where the target may be differently
    structured and have different semantics.

    "mapTo" is a dict with the following structure:
    ::
        {
            "target": JSON pointer to the target location
            "value": schema for the value to be inserted at the target location
        }

    The "value" schema has the following structure:
    ::
        {
            "type": (mandatory)
                - the JSON type of the target value ("string", "object", etc)
            "const": (optional)
                - a constant value to be set at the target location
                - if specified, the remaining parameters have no effect
            "source": (optional)
                - if this is a string, it specifies the property on the current instance from which the
                  target value is obtained
                - for a target type of string, this may be a list of strings; it specifies the properties
                  on the current instance from which the target value is constructed, by concatenating
                  source values
                - if this is not specified, then the entire current instance is used as the source of the
                  target value
            "separator": (mandatory, if "source" is a list of strings)
                - the separator string to be used for combining multiple source values
            "converter": (optional)
                - if this is a string, it indicates a conversion function (linked to a Python routine
                  in JSONValidator._converters), which takes the source value as input and returns the
                  target value
                - for a target type of string, a converter may be a dictionary defining a simple
                  vocabulary mapping, with key-value pairs specifying source-target keyword conversions
                - if a converter is used with target type array or object, then the items/properties
                  schemas are ignored
            "items": (mandatory, for a target type of array, unless a converter is used)
                - defines a nested "value" schema to be used for every item in the target array
            "properties": (mandatory, for a target type of object, unless a converter is used)
                - defines nested "value" schemas to be used for each property in the target object
        }
    """
    def make_value(source_instance, **kwargs):
        def cast(val):
            try:
                pytype = {
                    'string': unicode,
                    'integer': int,
                    'number': float,
                    'boolean': bool,
                    'array': list,
                    'object': dict,
                }[target_type]
            except KeyError:
                raise SyntaxError(_("Unsupported type {}".format(target_type)))
            try:
                return pytype(val)
            except (ValueError, TypeError):
                raise ValueError(
                    _("Unable to cast {} to {}".format(val, pytype)))

        target_type = kwargs.get('type')
        if not target_type:
            raise SyntaxError(
                _("A 'type' must be specified for the target value"))

        if target_type == 'null':
            return None

        const = kwargs.pop('const', None)
        if const is not None:
            return cast(const)

        source_prop = kwargs.pop('source', None)
        if source_prop is not None:
            if type(source_instance) is not dict:
                raise SyntaxError(
                    _("The 'source' keyword can only be used with object instances"
                      ))
            if isinstance(source_prop, basestring):
                if source_prop in source_instance:
                    return make_value(source_instance[source_prop], **kwargs)
                return None
            if type(source_prop) is list:
                if target_type != 'string':
                    raise SyntaxError(
                        _("The 'source' keyword can only specify a list of properties if the target type is 'string'"
                          ))
                separator = kwargs.pop('separator', None)
                if not isinstance(separator, basestring):
                    raise SyntaxError(
                        _("A 'separator' must be specified if 'source' is a list of properties"
                          ))
                result = []
                for source_prop_i in source_prop:
                    if source_prop_i in source_instance:
                        value = make_value(source_instance[source_prop_i],
                                           **kwargs)
                        if value:
                            result += [value]
                return separator.join(result)
            raise SyntaxError(_("Invalid value for 'source' keyword"))

        converter = kwargs.pop('converter', None)
        if converter is not None:
            if type(converter) is dict:
                if target_type != 'string':
                    raise SyntaxError(
                        _("A dictionary converter can only be used for a target type of 'string'"
                          ))
                return converter.get(unicode(source_instance),
                                     unicode(source_instance))
            if isinstance(converter, basestring):
                if converter not in validator.converters:
                    raise SyntaxError(
                        _("Converter '{}' not found".format(converter)))
                converter_fn = validator.converters[converter]
                try:
                    return cast(converter_fn(source_instance))
                except Exception, e:
                    raise ValueError(
                        _("Unable to convert value using '{}' function: {}".
                          format(converter, e)))
            raise SyntaxError(_("Invalid value for 'converter' keyword"))

        if target_type == 'array':
            item_schema = kwargs.pop('items', None)
            if type(item_schema) is not dict:
                raise SyntaxError(
                    _("An 'items' dictionary must be defined for a target type of 'array'"
                      ))
            result = []
            if type(source_instance) is list:
                for source_item in source_instance:
                    value = make_value(source_item, **item_schema)
                    if value is not None:
                        result += [value]
            else:
                value = make_value(source_instance, **item_schema)
                if value is not None:
                    result = [value]
            return result

        if target_type == 'object':
            properties = kwargs.pop('properties', None)
            if type(properties) is not dict:
                raise SyntaxError(
                    _("A 'properties' dictionary must be defined for a target type of 'object'"
                      ))
            result = {}
            for prop_name, prop_schema in properties.iteritems():
                if type(prop_schema) is not dict:
                    raise SyntaxError(
                        _("The value for each property, for a target type of 'object', must be a dictionary"
                          ))
                value = make_value(source_instance, **prop_schema)
                if value is not None:
                    result[prop_name] = value
            return result

        return cast(source_instance)
Example #43
0
 def dtype_from_stmt(self, stmt):
     length, kind = stmt.selector
     assert not kind
     return np.dtype(self.TYPE_MAP[(type(stmt).__name__.lower(), length)])
Example #44
0
books = ['a','b','c','d']

books_dict = [{"name": 'a', 'writer': "testA"},{"name": 'b', 'writer': "testB"},{"name": 'c', 'writer': "testC"},{"name": 'd', 'writer': "testD"}]

detail_books_dict= {}
#print(books_dict[1])
book_id = 100

for i in range (len(books)):
    # int_value= ord(i)
    # print(int_value)
    print((type(i)))
    print(i)
    detail_books_dict.setdefault(book_id,books_dict[1])
    book_id+= 1

print(detail_books_dict)
Example #45
0
    def test_initial_full_table(self):
        state = {}
        expected_log_file, expected_log_pos = binlog.fetch_current_log_file_and_pos(
            self.conn)

        global SINGER_MESSAGES
        SINGER_MESSAGES.clear()
        tap_mysql.do_sync(self.conn, {}, self.catalog, state)

        message_types = [type(m) for m in SINGER_MESSAGES]

        self.assertEqual(
            message_types,
            [
                singer.StateMessage,
                singer.SchemaMessage,
                singer.ActivateVersionMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.StateMessage,
                singer.ActivateVersionMessage,
                singer.StateMessage,
                singer.SchemaMessage,
                singer.ActivateVersionMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.StateMessage,
                singer.ActivateVersionMessage,
                singer.StateMessage,
            ],
        )

        activate_version_message_1 = list(
            filter(
                lambda m: isinstance(m, singer.ActivateVersionMessage) and m.
                stream == "binlog_1",
                SINGER_MESSAGES,
            ))[0]

        activate_version_message_2 = list(
            filter(
                lambda m: isinstance(m, singer.ActivateVersionMessage) and m.
                stream == "binlog_2",
                SINGER_MESSAGES,
            ))[0]

        record_messages = list(
            filter(lambda m: isinstance(m, singer.RecordMessage),
                   SINGER_MESSAGES))

        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_1",
                                "log_file"))
        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_1",
                                "log_pos"))

        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_2",
                                "log_file"))
        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_2",
                                "log_pos"))

        self.assertEqual(
            singer.get_bookmark(state, "tap_mysql_test-binlog_1", "version"),
            activate_version_message_1.version,
        )

        self.assertEqual(
            singer.get_bookmark(state, "tap_mysql_test-binlog_2", "version"),
            activate_version_message_2.version,
        )
Example #46
0
    for i, c in enumerate(pattern):
        if c not in alphanum:
            if c == "\000":
                s[i] = "\\000"
            else:
                s[i] = "\\" + c
    return pattern[:0].join(s)


# --------------------------------------------------------------------
# internals

_cache = {}
_cache_repl = {}

_pattern_type = type(sre_compile.compile("", 0))

_MAXCACHE = 100


def _compile(*key):
    # internal: compile pattern
    pattern, flags = key
    bypass_cache = flags & DEBUG
    if not bypass_cache:
        cachekey = (type(key[0]),) + key
        try:
            p, loc = _cache[cachekey]
            if loc is None or loc == _locale.setlocale(_locale.LC_CTYPE):
                return p
        except KeyError:
    def get_rank(self):
        return self.rank

    def draw(self, canvas, pos):
        card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank), 
                    CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))
        canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)
    
###################################################
# Test code

c1 = Card("S", "A")
print c1
print c1.get_suit(), c1.get_rank()
print type(c1)

c2 = Card("C", "2")
print c2
print c2.get_suit(), c2.get_rank()
print type(c2)

c3 = Card("D", "T")
print c3
print c3.get_suit(), c3.get_rank()
print type(c3)


###################################################
# Output to console
Example #48
0
    def test_binlog_stream(self):
        global SINGER_MESSAGES
        SINGER_MESSAGES.clear()

        config = test_utils.get_db_config()
        config["server_id"] = "100"

        tap_mysql.do_sync(self.conn, config, self.catalog, self.state)
        record_messages = list(
            filter(lambda m: isinstance(m, singer.RecordMessage),
                   SINGER_MESSAGES))

        message_types = [type(m) for m in SINGER_MESSAGES]
        self.assertEqual(
            message_types,
            [
                singer.StateMessage,
                singer.SchemaMessage,
                singer.SchemaMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.RecordMessage,
                singer.StateMessage,
            ],
        )

        self.assertEqual(
            [
                ("binlog_1", 1, "2017-06-01T00:00:00+00:00", False),
                ("binlog_1", 2, "2017-06-20T00:00:00+00:00", False),
                ("binlog_1", 3, "2017-09-22T00:00:00+00:00", False),
                ("binlog_2", 1, "2017-10-22T00:00:00+00:00", False),
                ("binlog_2", 2, "2017-11-10T00:00:00+00:00", False),
                ("binlog_2", 3, "2017-12-10T00:00:00+00:00", False),
                ("binlog_1", 3, "2018-06-18T00:00:00+00:00", False),
                ("binlog_2", 2, "2018-06-18T00:00:00+00:00", False),
                ("binlog_1", 2, "2017-06-20T00:00:00+00:00", True),
                ("binlog_2", 1, "2017-10-22T00:00:00+00:00", True),
            ],
            [(
                m.stream,
                m.record["id"],
                m.record["updated"],
                m.record.get(binlog.SDC_DELETED_AT) is not None,
            ) for m in record_messages],
        )

        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_1",
                                "log_file"))
        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_1",
                                "log_pos"))

        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_2",
                                "log_file"))
        self.assertIsNotNone(
            singer.get_bookmark(self.state, "tap_mysql_test-binlog_2",
                                "log_pos"))
Example #49
0
 def serialize_numpy(self, buff, numpy):
   """
   serialize message with numpy array types into buffer
   :param buff: buffer, ``StringIO``
   :param numpy: numpy python module
   """
   try:
     _x = self
     buff.write(_struct_7d.pack(_x.x, _x.y, _x.z, _x.r, _x.l_x, _x.l_y, _x.l_z))
   except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
   except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
Example #50
0
	def __type__ (self):
		return type(self.object)
Example #51
0
def shallowCopy(*args):
    inputList = args
    print(type(inputList))
    inputList.append(4)
    return inputList
Example #52
0
 def __init__(self, director_full_name: str):
     if director_full_name == "" or type(director_full_name) is not str:
         self.__director_full_name = None
     else:
         self.__director_full_name = director_full_name.strip()
     self.__movies = []
Example #53
0
def enum(*sequential, **named):
    enums = dict(zip(sequential, range(len(sequential))), **named)
    reverse = {value: key for key, value in enums.items()}
    enums['reverse_mapping'] = reverse
    return type('Enum', (), enums)
Example #54
0
    def get_range(self,
                  sc,
                  client_ids,
                  range_start,
                  range_end,
                  limit=None,
                  parallelism=None,
                  reverse=False):
        """ Return RDD[client_id, [ping1, ..., pingK]] where pings are limited
        to a given activity period.

        The list of pings is sorted by activity date.

        :param sc: a SparkContext
        :param client_ids: the client ids represented as UUIDs
        :param range_start: the beginning of the time period represented as a datetime.date instance
        :param range_end: the end of the time period (inclusive) represented as a datetime.date instance
        :param limit: the maximum number of pings to return per client id
        :param parallelism: the number of partitions of the resulting RDD
        :param reverse: whether to return pings in reverse chronological order, defaults to False
        """
        if not isinstance(range_start, date):
            raise TypeError(
                'range_start must be a datetime.date, not {}'.format(
                    type(range_start)))

        if not isinstance(range_end, date):
            raise TypeError('range_end must be a datetime.date, not {}'.format(
                type(range_end)))

        if not isinstance(limit, (int, type(None))):
            raise TypeError(
                'limit must be either an int or None, not {}'.format(
                    type(limit)))

        range_start = range_start.strftime("%Y%m%d")
        range_end = (range_end + timedelta(days=1)).strftime("%Y%m%d")

        if parallelism is None:
            parallelism = sc.defaultParallelism

        def _get_range(client_id, range_start, range_end, limit):
            if not self._validate_client_id(client_id):
                raise ValueError("Invalid Client ID!")

            row_start = "{}:{}".format(client_id, range_start)
            row_stop = "{}:{}".format(client_id, range_end)

            if reverse:
                row_start, row_stop = row_stop, row_start

            payloads = []
            with contextlib.closing(happybase.Connection(
                    self.hostname)) as connection:
                table = connection.table(self.tablename)
                for key, data in table.scan(row_start=row_start,
                                            row_stop=row_stop,
                                            limit=limit,
                                            columns=[self.column_family],
                                            reverse=reverse):
                    payloads.append(json.loads(data[self.column]))

            return (client_id, payloads)

        return sc.parallelize(client_ids, parallelism)\
            .map(partial(_get_range, range_start=range_start, range_end=range_end, limit=limit))
Example #55
0
def _render_services_matrix(components,
                            hosts,
                            info_view_settings,
                            enable_legend=False):
    host_components = set()
    for host in hosts:
        found = components.get(host)
        if not found:
            for c in [
                    h for h in components.values()
                    if type(h) is yadtshell.components.Host
                    or type(h) is yadtshell.components.UnreachableHost
            ]:
                if getattr(c, 'hostname', None) == host:
                    found = c
                    break
                if getattr(c, 'fqdn', None) == host:
                    found = c
                    break
        if not found:
            found = components.get("host://%s" % host)
        if not found:
            print('ERROR: cannot find host %s' % host)
            continue
        host_components.add(found)
    hosts = sorted(host_components, key=lambda h: h.uri)

    ranks = {}
    services = []
    for host in hosts:
        for servicedef in getattr(host, 'services', []):
            try:
                service = servicedef.keys()[0]
            except Exception:
                service = servicedef
            if service not in services:
                rank = components[yadtshell.uri.create(
                    yadtshell.settings.SERVICE, host.hostname,
                    service)].dependency_score
                services.append((rank, service))

    for rank, name in services:
        ranks[name] = rank

    icons, separator = calc_icon_strings(info_view_settings)
    if 'maxcols' in info_view_settings:
        print('  %s' % separator.join(['%-9s' % host.host for host in hosts]))
    elif '3cols' in info_view_settings:

        def print_3cols(start, end):
            line = []
            for name in [host.host for host in hosts]:
                line.append(name[start:end])
            print('   %s' % separator.join(['%3s' % string
                                            for string in line]))

        print_3cols(0, 3)
        print_3cols(3, 6)
        print_3cols(6, 9)
    else:
        last = None
        names = []
        max_len = 0
        for name in [host.host for host in hosts]:
            if not last:
                names.append(name)
                max_len = len(name)
            else:
                new_name = []
                for i in range(0, len(name)):
                    if name[i] == last[i]:
                        new_name.append(' ')
                    else:
                        new_name.append(name[i])
                names.append(separator.join(new_name))
            last = name
        for i in range(max_len):
            line = []
            for name in names:
                line.append(name[i])
            print('  %s' % ''.join(line))
    print()

    for name in sorted(ranks, key=lambda x: ranks[x]):
        s = []
        for host in hosts:
            uri = yadtshell.uri.create(yadtshell.settings.SERVICE, host.host,
                                       name)
            service = components.get(uri, None)
            s.append(calc_state_string(service, icons))
            suffix = ''
            if getattr(service, 'is_frontservice', False):
                suffix = '(frontservice)'
        print('  %s  service %s %s' % (separator.join(s), name, suffix))
    s = []
    for host in hosts:
        if not host.is_reachable():
            s.append(icons['UNKNOWN'])
        elif host.is_uptodate():
            s.append(icons['UPTODATE'])
        elif host.is_update_needed():
            s.append(icons['UPDATE_NEEDED'])
        else:
            s.append(icons['NA'])
    print('  %s  %s' % (separator.join(s), 'host uptodate'))

    s = []
    for host in hosts:
        if not host.is_reachable():
            s.append(icons['UNKNOWN'])
        elif host.reboot_required_to_activate_latest_kernel:
            s.append(icons['REBOOT_NOW'])
        elif host.reboot_required_after_next_update:
            s.append(icons['REBOOT_AFTER_UPDATE'])
        else:
            s.append(icons['UP'])
    print('  %s  %s' % (separator.join(s), 'reboot required'))

    s = []
    for host in hosts:
        if host.is_locked_by_other:
            s.append(icons['LOCKED_BY_OTHER'])
        elif host.is_locked_by_me:
            s.append(icons['LOCKED_BY_ME'])
        elif host.is_unknown():
            s.append(icons['UNKNOWN'])
        elif host.is_ignored:
            s.append(icons['UNKNOWN_IGNORED'])
        else:
            s.append(icons['NOT_LOCKED'])
    print('  %s  %s' % (separator.join(s), 'host access'))
    print()

    if enable_legend:
        render_legend(info_view_settings)
# messing with dictionaries during lecture
# Author: Sam Tracey

car = {"make": "Ford", "price": 123, "owner": {"first name": "Sam", "age": 44}}
print(type(car))
print(car)

car["model"] = "Focus"
print(car)
#make = car["make"]
#notMake = car.get("aakakakakka")
#print (type(notMake))
#print (make)
print(car["owner"]["age"])
def plus(A, B):
    assert (type(A) == list and type(B) == list and len(A) == len(B))
    return [A[i] + B[i] for i in range(len(A))]
Example #58
0
    def whoami(self):
        print('我是怪物我怕谁')


a1 = Monster(200)
print(a1.hp)
print(a1.run())
a2 = Animals(1)
print(a2.hp)
print(a2.run())

a3 = Boss(800)
a3.whoami()

print('a1的类型 %s' % type(a1))
print('a2的类型 %s' % type(a2))
print('a3的类型 %s' % type(a3))

print(isinstance(a2, Monster))
#
# user1 = Player('tom',100,'war')  #类的实例化
# user2 = Player('jerry',90,'master')
# user1.print_role()
# user2.print_role()
#
#
# user1.updateName('wilson')
# user1.print_role()
# user1.__name = ('aaa')
# user1.print_role()
def squrt(A):
    assert (type(A) == list)
    return [x**2 for x in A]
Example #60
0
        t = args.get('topn', 10)
        pos = [] if pos == None else pos
        neg = [] if neg == None else neg
        t = 10 if t == None else t
        queryinfo = "positive: " + str(pos) + " negative: " + str(
            neg) + " topn: " + str(t)
        logger.info(queryinfo)
        logger.info("pos 0:" + pos[0])

        try:
            ress = model.most_similar_cosmul(positive=pos,
                                             negative=neg,
                                             topn=t)
            logger.info(ress)
            res = [word[0].encode('utf-8') for word in ress]
            logger.info(type(res))
            return res
        except Exception, e:
            print e
            print res


class Infer(Resource):
    def get(self):
        logger.info("Infer class begins:")
        parser = reqparse.RequestParser()
        parser.add_argument('words',
                            type=str,
                            required=True,
                            help="word to infer vector.")
        args = parser.parse_args()