예제 #1
0
파일: mask.py 프로젝트: tjlane/miniana
 def mask_borders(self, num_pixels=1):
     """
     Mask the border of each ASIC, to a width of `num_pixels`.
     
     Parameters
     ----------
     num_pixels : int
         The size of the border region to mask.
     """
     
     print "Masking %d pixels around the border of each 2x1" % num_pixels
     
     n = int(num_pixels)        
     m = self._blank_mask()
     
     if (num_pixels < 0) or (num_pixels > 194):
         raise ValueError('`num_pixels` must be >0, <194')
     
     for i in range(4):
         for j in range(16):
             
             # mask along the y-dim
             m[i,j,:,0:n] = np.bool(False)
             m[i,j,:,194-n:194] = np.bool(False)
             
             # mask along the x-dim
             m[i,j,0:n,:] = np.bool(False)
             m[i,j,185-n:185,:] = np.bool(False)
             
             # # mask a bar along y in the middle of the 2x1
             # m[i,j,:,194-n:194+n] = np.bool(False)
     
     self._inject_mask('border', m, override_previous=True)
     
     return
예제 #2
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
예제 #3
0
파일: parsenml.py 프로젝트: pyIPP/trgui
def parsenml(nml, parname, fmt=5):


    val = []

    for line in open(nml):
        if len(line.strip()) > 0:
            if line.strip()[0] != '!':
                tmp = line.split('=')
                vname = tmp[0].split('(')[0].strip()
                if parname.upper() == vname.upper():
                    valstr = tmp[1].split('!')[0].strip()

                    if fmt in char_list:
                        val.append( valstr.replace("'", "") )
                    if fmt == 3 or fmt == 4:
                        for entry in valstr.split(','):
                            val.append(np.int32(entry))
                    if fmt == 7:
                        for entry in valstr.split(','):
                            if entry.upper().strip() in ['F', '.F.', 'FALSE', '.FALSE.']:
                                val.append( np.bool(False))
                            else:
                                val.append(np.bool(True))
                    if fmt == 5:
                        for entry in valstr.split(','):
                            val.append( np.float32(entry))

    if fmt in char_list:
        return val
    else:
        return np.array(val)
예제 #4
0
def test_sequence_numpy_boolean(seq):
    expected = [np.bool(True), None, np.bool(False), None]
    arr = pa.array(seq(expected))
    assert len(arr) == 4
    assert arr.null_count == 2
    assert arr.type == pa.bool_()
    assert arr.to_pylist() == expected
예제 #5
0
    def clean_manifold(self):
        current_status = M.heman.get_manifold_status_bits()
        noof_cleans = np.int(self.spinBox_noof_cleans.value())
        print(noof_cleans)
        self.label_cleaning_status.setText("<font style='color: %s'>%s</font>"%('Red', "Cleaning..."))
        gui.QApplication.processEvents()
        M.heman.clean_manifold(noof_cleans)

        # Leave the manifold as we started
        M.heman.set_gas(np.bool(current_status[0]))
        M.heman.set_pump(np.bool(current_status[1]))
        M.heman.set_cryostat(np.bool(current_status[2]))

        self.set_heman_labels()
        self.label_cleaning_status.setText("<font style='color: %s'>%s</font>"%('Black', "Cleaning completed"))
예제 #6
0
파일: QclVariable.py 프로젝트: hagisgit/qcl
 def set_array(self, value):
     """calls set_array and _set_buffer"""
     #if defined as scalar variable, set the array and the buffer to a single numpy value, according to specified type
     if (self._addspc == 'scalar') and (isinstance(value, int) or isinstance(value, long) or isinstance(value, float)):
         if (self._dtypestr == 'int'):
             self._array = np.int32(value)
         elif (self._dtypestr == 'uint'):
             self._array = np.uint32(value)
         elif (self._dtypestr == 'long'):
             self._array = np.int64(value)
         elif (self._dtypestr == 'bool'):
             self._array = np.bool(value)
         elif (self._dtypestr == 'real'):
             if bool(self._solverobj.cldevice.get_info(cl.device_info.DOUBLE_FP_CONFIG)):
                 self._array = np.float64(value)
             else:
                 self._array = np.float32(value)
         self._set_buffer(self._array)
     elif (self._addspc == '__local'):
         raise ValueError(value, '__local defined variables cannot be set from the host device.')
     #if defined as __global, input must match the variable's type, that is: numpy array's dytpe and shape[1], 
     # which defines the specific vector length (real, real4, ...)
     elif isinstance(value, np.ndarray):
         if (len(value.shape) == 1):
             value.shape = (value.shape[0], 1)
         if (value.shape[1] == self._array.shape[1]):
             self._array = value.astype(self._array.dtype, copy=True)
             #self._array = np.zeros(value.shape, value.dtype)
             #self._array += value
             self.set_length(value.shape[0])
             #self._set_buffer(cl.Buffer(self._solverobj.clcontext, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=self._array)) #not needed, called in set_length            
         else:
             raise ValueError(value,'is not a valid value for this variable. Must be a numpy array of fitting shape and dtype or number for scalar varibales. ')
     else:
             raise ValueError(value,'is not a valid value for this variable. Must be a numpy array. ')
예제 #7
0
def readData(dataFile):

# read the data sets
# each line has one utterance that contains tab separated utterance words and corresponding IOB tags
# if the input is multiturn session data, the flag following the IOB tags is 1 (session start) or 0 (not session start)
 
	utterances = list()
	tags = list()
	starts = list()
	startid = list()

	# reserving index 0 for padding
	# reserving index 1 for unknown word and tokens
	word_vocab_index = 2
	tag_vocab_index = 2
	word2id = {'<pad>': 0, '<unk>': 1}
	tag2id = {'<pad>': 0, '<unk>': 1}
	id2word = ['<pad>', '<unk>']
	id2tag = ['<pad>', '<unk>']

	utt_count = 0
	temp_startid = 0
	for line in open(dataFile, 'r'):
		d=line.split('\t')
		utt = d[0].strip()
		t = d[1].strip()
		if len(d) > 2:
			start = np.bool(int(d[2].strip()))
			starts.append(start)
			if start:
				temp_startid = utt_count
			startid.append(temp_startid)
		#print 'utt: %s, tags: %s' % (utt,t) 

		temp_utt = list()
		temp_tags = list()
		mywords = utt.split()
		mytags = t.split()
		if len(mywords) != len(mytags):
			print mywords
			print mytags
		# now add the words and tags to word and tag dictionaries
		# also save the word and tag sequence in training data sets
		for i in xrange(len(mywords)):
			if mywords[i] not in word2id:
				word2id[mywords[i]] = word_vocab_index
				id2word.append(mywords[i])
				word_vocab_index += 1
			if mytags[i] not in tag2id:
				tag2id[mytags[i]] = tag_vocab_index
				id2tag.append(mytags[i])
				tag_vocab_index += 1
			temp_utt.append(word2id[mywords[i]])
			temp_tags.append(tag2id[mytags[i]])
		utt_count += 1
		utterances.append(temp_utt)
		tags.append(temp_tags)

	data = {'start': starts, 'startid': startid, 'utterances': utterances, 'tags': tags, 'uttCount': utt_count, 'id2word':id2word, 'id2tag':id2tag, 'wordVocabSize' : word_vocab_index, 'tagVocabSize': tag_vocab_index, 'word2id': word2id, 'tag2id':tag2id}
	return data
예제 #8
0
파일: tests.py 프로젝트: fperez/regreg
def test_deltaSoft(n=1):

    p = 1000
    ctype = np.zeros(4)
    for i in range(n):
        v = np.random.normal(0, 10, p)
        normalize = np.bool(np.sign(np.random.normal()) + 1)
        if normalize:
            v = v / np.sqrt(np.sum(v ** 2))
        l = np.max([np.fabs(np.random.normal(np.sum(np.fabs(v)), 50)), 1.0])
        vec = scca.deltaSoft(v, l, normalize)
        if normalize:
            n2vec = np.sum(vec ** 2)
            assert np.fabs(n2vec - 1.0) < 1e-8
        n1v = np.sum(np.fabs(v))
        if n1v <= l:
            if normalize:
                ctype[0] += 1
            else:
                ctype[1] += 1
            assert np.sum((v - vec) ** 2) < 1e-8
        else:
            if normalize:
                ctype[2] += 1
            else:
                ctype[3] += 1
            n1vec = np.sum(np.fabs(vec))
            assert np.sum((n1vec - l) ** 2) < 1e-8
예제 #9
0
파일: optimize.py 프로젝트: tjlane/miniana
    def _slice(self, bin_centers, bin_values):
        """
        slice out only the requested parts of the radial profile
        """

        rmin = np.min( self.radius_range )
        rmax = np.max( self.radius_range )
        if (not np.any(bin_centers > rmax)) or (not np.any(bin_centers < rmin)):
            raise ValueError('Invalid radius range -- out of bounds of measured radii.')
        
        if len(self.radius_range) > 0:
            include = np.zeros( len(bin_values), dtype=np.bool)
            for i in range(0, len(self.radius_range), 2):
                inds = (bin_centers > self.radius_range[i]) * \
                       (bin_centers < self.radius_range[i+1])
                include[inds] = np.bool(True)

            if np.sum(include) == 0:
                raise RuntimeError('`radius_range` slices were not big enough to '
                                   'include any data!')

            bin_centers = bin_centers[include]
            bin_values  = bin_values[include]
            
        return bin_centers, bin_values
예제 #10
0
파일: format.py 프로젝트: KevinKSY/TEA
def printout(str, it_num = False):
    '''
    Prints iteration progress number or other information in one line of 
    terminal.

    Parameters
    ----------
    str:    string
            String defining what will be printed.
    it_num: integer 
            Iteration number to be printed. If False, will print out 
            contents of str instead.

    Returns
    -------
    None
    '''

    # Create print-out for terminal that can be overwritten
    stdout.write('\r\n')
    if np.bool(it_num):
        # Print iteration number
        stdout.write(str % it_num)
    else:
        # Print other input
        stdout.write(str)
    
    # Clear printed value to allow overwriting for next
    stdout.flush()
예제 #11
0
def induct(x):
    from . import cudata
    """Compute Copperhead type of an input, also convert data structure"""
    if isinstance(x, cudata.cuarray):
        return (conversions.back_to_front_type(x.type), x)
    if isinstance(x, np.ndarray):
        induced = cudata.cuarray(x)
        return (conversions.back_to_front_type(induced.type), induced)
    if isinstance(x, np.float32):
        return (coretypes.Float, x)
    if isinstance(x, np.float64):
        return (coretypes.Double, x)
    if isinstance(x, np.int32):
        return (coretypes.Int, x)
    if isinstance(x, np.int64):
        return (coretypes.Long, x)
    if isinstance(x, np.bool):
        return (coretypes.Bool, x)
    if isinstance(x, list):
        induced = cudata.cuarray(np.array(x))
        return (conversions.back_to_front_type(induced.type), induced)
    if isinstance(x, float):
        #Treat Python floats as double precision
        return (coretypes.Double, np.float64(x))
    if isinstance(x, int):
        #Treat Python ints as 64-bit ints (following numpy)
        return (coretypes.Long, np.int64(x))
    if isinstance(x, bool):
        return (coretypes.Bool, np.bool(x))
    if isinstance(x, tuple):
        sub_types, sub_elements = zip(*(induct(y) for y in x))
        return (coretypes.Tuple(*sub_types), tuple(sub_elements))
    #Can't digest this input
    raise ValueError("This input is not convertible to a Copperhead data structure: %r" % x)
예제 #12
0
    def test_is_number(self):

        self.assertTrue(is_number(True))
        self.assertTrue(is_number(1))
        self.assertTrue(is_number(1.1))
        self.assertTrue(is_number(1 + 3j))
        self.assertTrue(is_number(np.bool(False)))
        self.assertTrue(is_number(np.int64(1)))
        self.assertTrue(is_number(np.float64(1.1)))
        self.assertTrue(is_number(np.complex128(1 + 3j)))
        self.assertTrue(is_number(np.nan))

        self.assertFalse(is_number(None))
        self.assertFalse(is_number('x'))
        self.assertFalse(is_number(datetime(2011, 1, 1)))
        self.assertFalse(is_number(np.datetime64('2011-01-01')))
        self.assertFalse(is_number(Timestamp('2011-01-01')))
        self.assertFalse(is_number(Timestamp('2011-01-01',
                                             tz='US/Eastern')))
        self.assertFalse(is_number(timedelta(1000)))
        self.assertFalse(is_number(Timedelta('1 days')))

        # questionable
        self.assertFalse(is_number(np.bool_(False)))
        self.assertTrue(is_number(np.timedelta64(1, 'D')))
예제 #13
0
 def parse_value(self,value):
     '''parses and casts the raw value into an acceptable format for __value
     lot of defense here, so we can make assumptions later
     '''
     if isinstance(value,list):
         print 'util_2d: casting list to array'
         value = np.array(value)
     if isinstance(value,bool):
         if self.dtype == np.bool:
             try:
                 value = np.bool(value)
                 return value
             except:
                 raise Exception('util_2d:could not cast '+\
                     'boolean value to type "np.bool": '+str(value))
         else:
             raise Exeception('util_2d:value type is bool, '+\
                ' but dtype not set as np.bool') 
     if isinstance(value,str):
         if self.dtype == np.int:
             try:
                 value = int(value)
             except:
                 assert os.path.exists(value),'could not find file: '+str(value)
                 return value
         else:
             try:
                 value = float(value)
             except:
                 assert os.path.exists(value),'could not find file: '+str(value)
                 return value
     if np.isscalar(value):
         if self.dtype == np.int:
             try:
                 value = np.int(value)
                 return value
             except:
                 raise Exception('util_2d:could not cast scalar '+\
                     'value to type "int": '+str(value))
         elif self.dtype == np.float32:
             try:
                 value = np.float32(value)
                 return value
             except:
                 raise Exception('util_2d:could not cast '+\
                     'scalar value to type "float": '+str(value))
         
     if isinstance(value,np.ndarray):
         if self.shape != value.shape:
             raise Exception('util_2d:self.shape: '+str(self.shape)+\
                 ' does not match value.shape: '+str(value.shape))
         if self.dtype != value.dtype:
             print 'util_2d:warning - casting array of type: '+\
                 str(value.dtype)+' to type: '+str(self.dtype)
         return value.astype(self.dtype)
     
     else:
         raise Exception('util_2d:unsupported type in util_array: '\
             +str(type(value))) 
예제 #14
0
파일: parse.py 프로젝트: shenglan0407/thor
    def _pilatus_mask(self, border_size=3):
        """
        The pixels on the edges of the detector are often noisy -- this function
        provides a way to mask both the gaps and these border pixels.

        Parameters
        ----------
        border_size : int
            The size of the border (in pixels) with which to extend the mask
            around the detector gaps.
        """

        border_size = int(border_size)
        mask = np.ones(self.intensities_shape, dtype=np.bool)

        # below we have the cols (x_gaps) and rows (y_gaps) to mask
        # these mask the ASIC gaps

        x_gaps = [(194-border_size,  212+border_size),
                  (406-border_size,  424+border_size),
                  (618-border_size,  636+border_size),
                  (830-border_size,  848+border_size),
                  (1042-border_size, 1060+border_size),
                  (1254-border_size, 1272+border_size),
                  (1466-border_size, 1484+border_size),
                  (1678-border_size, 1696+border_size),
                  (1890-border_size, 1908+border_size),
                  (2102-border_size, 2120+border_size),
                  (2314-border_size, 2332+border_size)]
                  
        y_gaps = [(486-border_size,  494+border_size),
                  (980-border_size,  988+border_size),
                  (1474-border_size, 1482+border_size),
                  (1968-border_size, 1976+border_size)]

        for x in x_gaps:
            mask[x[0]:x[1],:] = np.bool(False)

        for y in y_gaps:
            mask[:,y[0]:y[1]] = np.bool(False)
        
            
        # we also mask the beam stop for 12-2...
        mask[1200:1325,1164:] = np.bool(False)

        return mask
예제 #15
0
파일: format.py 프로젝트: obowman/TEA
def printout(str, it_num = False):
    '''
    Used to print iteration progress number.
    '''
    stdout.write('\r\n')
    if np.bool(it_num):
        stdout.write(str % it_num)
    else:
        stdout.write(str)
    stdout.flush()
예제 #16
0
 def test_int(self):
     self.assert_equal_with_lambda_check(_flexible_type(1), 1)
     self.assert_equal_with_lambda_check(_flexible_type(1L), 1)
     self.assert_equal_with_lambda_check(_flexible_type(True), 1)
     self.assert_equal_with_lambda_check(_flexible_type(False), 0)
     # numpy types
     self.assert_equal_with_lambda_check(_flexible_type(np.int_(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int64(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int32(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int16(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint64(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint32(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint16(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool(0)), 0)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool_(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool_(0)), 0)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool8(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool8(0)), 0)
예제 #17
0
파일: fabioh5.py 프로젝트: vallsv/silx
    def _convert_metadata_vector(self, values):
        """Convert a list of numpy data into a numpy array with the better
        fitting type."""
        converted = []
        types = set([])
        has_none = False
        for v in values:
            if v is None:
                converted.append(None)
                has_none = True
            else:
                c = self._convert_value(v)
                converted.append(c)
                types.add(c.dtype)

        if has_none and len(types) == 0:
            # That's a list of none values
            return numpy.array([0] * len(values), numpy.int8)

        result_type = numpy.result_type(*types)

        if issubclass(result_type.type, numpy.string_):
            # use the raw data to create the array
            result = values
        elif issubclass(result_type.type, numpy.unicode_):
            # use the raw data to create the array
            result = values
        else:
            result = converted

        result_type = self._normalize_vector_type(result_type)

        if has_none:
            # Fix missing data according to the array type
            if result_type.kind == "S":
                none_value = b""
            elif result_type.kind == "U":
                none_value = u""
            elif result_type.kind == "f":
                none_value = numpy.float("NaN")
            elif result_type.kind == "i":
                none_value = numpy.int(0)
            elif result_type.kind == "u":
                none_value = numpy.int(0)
            elif result_type.kind == "b":
                none_value = numpy.bool(False)
            else:
                none_value = None

            for index, r in enumerate(result):
                if r is not None:
                    continue
                result[index] = none_value

        return numpy.array(result, dtype=result_type)
def processUDPMessage(m,rc):

    l = re.split(',', m)
    print l

    if not len(l) == 8:
        return False

    rc.header.frame_id = "agent_" + str(g_agent_number)
    rc.header.stamp = rospy.Time.now()
    rc.velocity.linear.x = float(l[0])
    rc.velocity.linear.y = float(l[1])
    rc.velocity.angular.z = float(l[2])
    rc.kick_power = np.uint8(l[3])
    rc.high_kick = np.bool(np.uint8(l[4]))
    rc.low_kick = np.bool(np.uint8(l[5]))
    rc.grabber_left_speed = np.uint8(l[6])
    rc.grabber_right_speed = np.uint8(l[7])

    return True
예제 #19
0
파일: test_xray.py 프로젝트: tjlane/thor
    def test_polar_mask_conversion(self):

        # make a real mask that is a circle, and then it should be easy to check
        # that the polar mask is correct
        
        q_cutoff_index = 5
        q_values = np.arange(3.5, 4.5, 0.1)
        q_cutoff = q_values[q_cutoff_index]
        
        rm = np.ones(self.d.num_pixels, dtype=np.bool)
        rm[self.d.recpolar[:,0] < q_cutoff] = np.bool(False)
        
        shot = xray.Shotset(self.i, self.d, mask=rm)
        r = shot.to_rings(q_values)
        
        ref_pm = np.ones((len(q_values), r.num_phi), dtype=np.bool)
        ref_pm[:q_cutoff_index+1,:] = np.bool(False)
        
        print "num px masked", ref_pm.sum(), r.polar_mask.sum()
        assert ref_pm.sum() == r.polar_mask.sum() # same num px masked
        assert_array_equal(ref_pm, r.polar_mask)
예제 #20
0
 def __init__(self, par_path):
     self.par_path = par_path
     TrackingParams = par.TrackingParams(path=self.par_path)
     TrackingParams.read()
     self.dvxmin = TrackingParams.dvxmin
     self.dvxmax = TrackingParams.dvxmax
     self.dvymin = TrackingParams.dvymin
     self.dvymax = TrackingParams.dvymax
     self.dvzmin = TrackingParams.dvzmin
     self.dvzmax = TrackingParams.dvzmax
     self.angle = TrackingParams.angle
     self.dacc = TrackingParams.dacc
     self.flagNewParticles = n.bool(TrackingParams.flagNewParticles)
예제 #21
0
파일: io.py 프로젝트: MickaelRigault/snbias
def gethost_our_morphology_data():
    """
    This is a compilation for some SNe~Ia (nearby once)
      It is mainly based on Asiago, but Greg Aldering made some
    """
    dico = {}
    datas = [l for l in open(_fileMorphoCompil).read().splitlines() if l[0] != "#"]
    for l in datas:
        A = l.split()
        dico_ = {
            "object":A[0],
            "zcmb":np.float(A[1]),
            "inPS1": np.bool(np.int(A[2])),
            "inUn3": np.bool(np.int(A[3])),
            "inK14": np.bool(np.int(A[4])),
            "inR14": np.bool(np.int(A[5])),
            "Fchart":np.bool(np.int(A[6])),
            "Ra":np.float(A[7].split(",")[0]),"Dec":np.float(A[8].split(",")[0]),
            "morphology":A[9]
            }
        dico[dico_["object"]] = dico_
    return dico
예제 #22
0
파일: test_gbq.py 프로젝트: akloster/pandas
 def test_type_conversion(self):
     # All BigQuery Types should be cast into appropriate numpy types
     sample_input = [('1.095292800E9', 'TIMESTAMP'),
              ('false', 'BOOLEAN'),
              ('2', 'INTEGER'),
              ('3.14159', 'FLOAT'),
              ('Hello World', 'STRING')]
     actual_output = [gbq._parse_entry(result[0],result[1]) for result in sample_input]
     sample_output = [np.datetime64('2004-09-16T00:00:00.000000Z'),
               np.bool(False),
               np.int('2'),
               np.float('3.14159'),
               'Hello World']
     self.assertEqual(actual_output, sample_output, 'A format conversion failed')
예제 #23
0
def testVisibility():
    tdata = create_test_timeseries()
    n_times = tdata.shape[1]
    vg = VisibilityGraph(tdata[1], timings=tdata[0])
    # Choose two different, not neighbouring random nodes i, j
    node1, node2 = 0, 0
    while (abs(node2-node1)<=1):
        node1 = np.int(np.floor(np.random.rand()*n_times))
        node2 = np.int(np.floor(np.random.rand()*n_times))
    time, val = tdata
    i, j = min(node1, node2), max(node1, node2)
    testfun = lambda k: np.less((val[k]-val[i])/(time[k]-time[i]),
                                (val[j]-val[i])/(time[j]-time[i]))
    test = np.bool(np.sum(~np.array(map(testfun, xrange(i+1,j)))))
    assert np.invert(test) == vg.visibility(node1, node2)
예제 #24
0
 def test_numpy_scalar_conversion_dtypes(self):
     self.assertEqual(nd.ndarray(np.bool_(True)).dtype, nd.bool)
     self.assertEqual(nd.ndarray(np.bool(True)).dtype, nd.bool)
     self.assertEqual(nd.ndarray(np.int8(100)).dtype, nd.int8)
     self.assertEqual(nd.ndarray(np.int16(100)).dtype, nd.int16)
     self.assertEqual(nd.ndarray(np.int32(100)).dtype, nd.int32)
     self.assertEqual(nd.ndarray(np.int64(100)).dtype, nd.int64)
     self.assertEqual(nd.ndarray(np.uint8(100)).dtype, nd.uint8)
     self.assertEqual(nd.ndarray(np.uint16(100)).dtype, nd.uint16)
     self.assertEqual(nd.ndarray(np.uint32(100)).dtype, nd.uint32)
     self.assertEqual(nd.ndarray(np.uint64(100)).dtype, nd.uint64)
     self.assertEqual(nd.ndarray(np.float32(100.)).dtype, nd.float32)
     self.assertEqual(nd.ndarray(np.float64(100.)).dtype, nd.float64)
     self.assertEqual(nd.ndarray(np.complex64(100j)).dtype, nd.cfloat32)
     self.assertEqual(nd.ndarray(np.complex128(100j)).dtype, nd.cfloat64)
예제 #25
0
def load_attributes (path, subj, task,  **kwargs):
    ## Should return attr and a code to check if loading has been exploited #####
    
    #Default header struct
    header = ['targets', 'chunks']
    
    for arg in kwargs:
        if (arg == 'sub_dir'):
            sub_dirs = kwargs[arg].split(',')
        if (arg == 'event_file'):
            event_file = kwargs[arg]
        if (arg == 'event_header'):
            header = kwargs[arg].split(',')
            # If it's one item is a boolean
            if len(header) == 1:
                header = np.bool(header[0])
            
    
    directory_list = add_subdirs(path, subj, sub_dirs)
    
    attribute_list = []
    
    logger.debug(directory_list)
    
    for d in directory_list:
        temp_list = os.listdir(d)
        attribute_list += [os.path.join(d,f) for f in temp_list if f.find(event_file) != -1]
        
        
    logger.debug(attribute_list)
    
    # Small check
    if len(attribute_list) > 2:
        attribute_list = [f for f in attribute_list if f.find(subj) != -1]
        
    
    if len(attribute_list) == 0:
        logger.error('ERROR: No attribute file found!')
        logger.error( 'Checked in '+str(directory_list))
        return None
    
    
    logger.debug(header)
    
    attr_fname = attribute_list[0]
    
    attr = SampleAttributes(attr_fname, header=header)
    return attr
예제 #26
0
파일: test_io.py 프로젝트: desihub/desiutil
 def test_yamlify(self):
     """Test yamlify
     """
     fdict = {'name':'test', 'num':np.int32(3), 1: 'expid', 'flt32':np.float32(3.), 'flt64':np.float64(2.),
                  'num2':np.int64(4), 'bool':np.bool(True), 'lst':['tst2', np.int16(2)],
                  'tup':(1,3), 'dct':{'a':'tst3', 'b':np.float32(6.)}, 'array': np.zeros(10)}
     if sys.version_info >= (3,0,0):
         self.assertIsInstance(fdict['name'], str)
     else:
         self.assertIsInstance(fdict['name'], unicode)
     # Run
     ydict = yamlify(fdict)
     self.assertIsInstance(ydict['flt32'], float)
     self.assertIsInstance(ydict['array'], list)
     for key in ydict.keys():
         if isinstance(key, basestring):
             self.assertIsInstance(key, str)
예제 #27
0
파일: decals.py 프로젝트: willettk/decals
def tractor_cut(tractor,verbose=False):

    '''
    Dustin Lang, 5 Jun 2015
    
    I'd suggest cutting to brick_primary = 1, decam_anymask = 0, and decam_nobs >= 1 for array elements 1, 2, and 4 (decam_nobs is an array for bands ugrizY).
    '''
    
    brick_primary = tractor['brick_primary']
    anymask = [np.logical_not(np.bool(x[1]) | np.bool(x[2]) | np.bool(x[4])) for x in tractor['decam_anymask']]
    nobs = [np.bool(x[1]) & np.bool(x[2]) & np.bool(x[4]) for x in tractor['decam_nobs']]
    
    '''
    I'm not sure what a reasonable minimum size is, but I'd guess you're right with the 5"-10" range.
    '''
    
    sizelim_arcsec = 10.
    
    sizearr = [x['shapeExp_r'] if x['fracDev'] < 0.5 else x['shapeDev_r'] for x in tractor]
    #sizearr = [np.max((x['shapeExp_r'],x['shapeDev_r'])) for x in tractor]
    sizemask = np.array(sizearr) > sizelim_arcsec
    
    # Require a flux measurement in all bands?
    
    flux = [True if np.min(x[[1,2,4]]) > 0 else False for x in tractor['decam_flux']]
    
    # Type of object
    
    not_star = [True if x != 'PSF' else False for x in tractor['type']]
    
    # Combine all cuts
    
    cuts = brick_primary & np.array(anymask) & np.array(nobs) & sizemask & np.array(flux) & np.array(not_star)
    
    if verbose:
        print "\nbrick_primary: %i galaxies" % sum(brick_primary)
        print "anymask: %i galaxies" % sum(np.array(anymask))
        print "nobs: %i galaxies" % sum(np.array(nobs))
        print "sizemask: %i galaxies" % sum(brick_primary)
        print "flux: %i galaxies" % sum(np.array(flux))
        print "notstar: %i galaxies" % sum(np.array(not_star))
    
    print '\n%i objects in file; %i meet all cuts on image quality and size' % (len(tractor),cuts.sum())
    
    # What's the average magnitude of these galaxies?
    
    rflux = [x[2] for x in tractor[cuts]['decam_flux']]
    rmag = 22.5 - 2.5*np.log10(np.array(rflux))
    print "\nAverage r-band magnitude is %.1f +- %.1f" % (np.mean(rmag),np.std(rmag))
    
    cutdata = tractor[cuts]
    
    return cutdata
예제 #28
0
def readTest(testFile,word2id,tag2id,id2word,id2tag):

	utterances = list()
	tags = list()
	starts = list()
	startid = list()

	utt_count = 0
	temp_startid = 0
	for line in open(testFile, 'r'):
		d=line.split('\t')
		utt = d[0].strip()
		t = d[1].strip()
		if len(d) > 2:
			start = np.bool(int(d[2].strip()))
			starts.append(start)
			if start:
				temp_startid = utt_count
			startid.append(temp_startid)
	#print 'utt: %s, tags: %s' % (utt,t) 

		temp_utt = list()
		temp_tags = list()
		mywords = utt.split()
		mytags = t.split()
		# now add the words and tags to word and tag dictionaries
		# also save the word and tag sequence in training data sets
		for i in xrange(len(mywords)):
			if mywords[i] not in word2id:
				temp_utt.append(1) #i.e. append unknown word
			else:
				temp_utt.append(word2id[mywords[i]])
			if mytags[i] not in tag2id:
				temp_tags.append(1)
			else:
				temp_tags.append(tag2id[mytags[i]])
		utt_count += 1
		utterances.append(temp_utt)
		tags.append(temp_tags)
		wordVocabSize = len(word2id)

	data = {'start': starts, 'startid': startid, 'utterances': utterances, 'tags': tags, 'uttCount': utt_count, 'wordVocabSize' : wordVocabSize, 'id2word':id2word, 'id2tag': id2tag}
	return data
예제 #29
0
 def test_numpy_scalar_conversion_dtypes(self):
     self.assertEqual(nd.dtype_of(nd.array(np.bool_(True))), ndt.bool)
     self.assertEqual(nd.dtype_of(nd.array(np.bool(True))), ndt.bool)
     self.assertEqual(nd.dtype_of(nd.array(np.int8(100))), ndt.int8)
     self.assertEqual(nd.dtype_of(nd.array(np.int16(100))), ndt.int16)
     self.assertEqual(nd.dtype_of(nd.array(np.int32(100))), ndt.int32)
     self.assertEqual(nd.dtype_of(nd.array(np.int64(100))), ndt.int64)
     self.assertEqual(nd.dtype_of(nd.array(np.uint8(100))), ndt.uint8)
     self.assertEqual(nd.dtype_of(nd.array(np.uint16(100))), ndt.uint16)
     self.assertEqual(nd.dtype_of(nd.array(np.uint32(100))), ndt.uint32)
     self.assertEqual(nd.dtype_of(nd.array(np.uint64(100))), ndt.uint64)
     self.assertEqual(nd.dtype_of(nd.array(np.float32(100.))), ndt.float32)
     self.assertEqual(nd.dtype_of(nd.array(np.float64(100.))), ndt.float64)
     self.assertEqual(nd.dtype_of(nd.array(np.complex64(100j))),
                      ndt.complex_float32)
     self.assertEqual(nd.dtype_of(nd.array(np.complex128(100j))),
                      ndt.complex_float64)
     if np.__version__ >= '1.7':
         self.assertEqual(nd.dtype_of(nd.array(np.datetime64('2000-12-13'))), ndt.date)
예제 #30
0
def bgo_event(buf, out):
    """Parse an event packet from the BGO shields (SystemID 0xB6 and TmType 0x80 or 0x82)

    Notes
    -----
    TmType 0x80 can have up to 64 events, while TmType 0x82 can have up to 102 events
    """
    if not (out['systemid'] == 0xB6 and (out['tmtype'] == 0x80 or out['tmtype'] == 0x82)):
        raise ValueError

    event_time = np.zeros(102, np.int64)
    clock_source = np.zeros(102, np.uint8)
    clock_synced = np.zeros(102, np.bool)
    channel = np.zeros(102, np.uint8)
    level = np.zeros(102, np.uint8)

    bytes_per_event = 5 if out['tmtype'] == 0x82 else 8

    index = INDEX_PAYLOAD
    loc = 0

    while index < INDEX_PAYLOAD + out['length']:
        time_lsb = buf[index] | buf[index + 1] << 8 | buf[index + 2] << 16 | (buf[index + 3] & 0x0F) << 24
        time_diff = (out['systime'] & ((1 << 28) - 1)) - time_lsb
        if time_diff < 0:
            time_diff += 1 << 28
        event_time[loc] = (out['systime'] - time_diff) * 10 + (buf[index + 3] >> 4) * 2

        clock_source[loc] = buf[index + 4] >> 7 & 1
        clock_synced[loc] = np.bool(buf[index + 4] >> 6 & 1)
        channel[loc] = buf[index + 4] >> 2 & 0x0F
        level[loc] = buf[index + 4] & 0x03

        index += bytes_per_event
        loc += 1

    out['event_time'] = event_time[:loc]
    out['clock_source'] = clock_source[:loc]
    out['clock_synced'] = clock_synced[:loc]
    out['channel'] = channel[:loc]
    out['level'] = level[:loc]

    return out
예제 #31
0
def buffer(geometry,
           radius,
           quadsegs=8,
           cap_style="round",
           join_style="round",
           mitre_limit=5.0,
           single_sided=False,
           **kwargs):
    """
    Computes the buffer of a geometry for positive and negative buffer radius.

    The buffer of a geometry is defined as the Minkowski sum (or difference,
    for negative width) of the geometry with a circle with radius equal to the
    absolute value of the buffer radius.

    The buffer operation always returns a polygonal result. The negative
    or zero-distance buffer of lines and points is always empty.

    Parameters
    ----------
    geometry : Geometry or array_like
    width : float or array_like
        Specifies the circle radius in the Minkowski sum (or difference).
    quadsegs : int
        Specifies the number of linear segments in a quarter circle in the
        approximation of circular arcs.
    cap_style : {'round', 'square', 'flat'}
        Specifies the shape of buffered line endings. 'round' results in
        circular line endings (see ``quadsegs``). Both 'square' and 'flat'
        result in rectangular line endings, only 'flat' will end at the
        original vertex, while 'square' involves adding the buffer width.
    join_style : {'round', 'bevel', 'sharp'}
        Specifies the shape of buffered line midpoints. 'round' results in
        rounded shapes. 'bevel' results in a beveled edge that touches the
        original vertex. 'mitre' results in a single vertex that is beveled
        depending on the ``mitre_limit`` parameter.
    mitre_limit : float
        Crops of 'mitre'-style joins if the point is displaced from the
        buffered vertex by more than this limit.
    single_sided : bool
        Only buffer at one side of the geometry.

    Examples
    --------
    >>> buffer(Geometry("POINT (10 10)"), 2, quadsegs=1)
    <pygeos.Geometry POLYGON ((12 10, 10 8, 8 10, 10 12, 12 10))>
    >>> buffer(Geometry("POINT (10 10)"), 2, quadsegs=2)
    <pygeos.Geometry POLYGON ((12 10, 11.4 8.59, 10 8, 8.59 8.59, 8 10, 8.59 11.4, 10 12, 11.4 11.4, 12 10))>
    >>> buffer(Geometry("POINT (10 10)"), -2, quadsegs=1)
    <pygeos.Geometry POLYGON EMPTY>
    >>> line = Geometry("LINESTRING (10 10, 20 10)")
    >>> buffer(line, 2, cap_style="square")
    <pygeos.Geometry POLYGON ((20 12, 22 12, 22 8, 10 8, 8 8, 8 12, 20 12))>
    >>> buffer(line, 2, cap_style="flat")
    <pygeos.Geometry POLYGON ((20 12, 20 8, 10 8, 10 12, 20 12))>
    >>> buffer(line, 2, single_sided=True, cap_style="flat")
    <pygeos.Geometry POLYGON ((20 10, 10 10, 10 12, 20 12, 20 10))>
    >>> line2 = Geometry("LINESTRING (10 10, 20 10, 20 20)")
    >>> buffer(line2, 2, cap_style="flat", join_style="bevel")
    <pygeos.Geometry POLYGON ((18 12, 18 20, 22 20, 22 10, 20 8, 10 8, 10 12, 18 12))>
    >>> buffer(line2, 2, cap_style="flat", join_style="mitre")
    <pygeos.Geometry POLYGON ((18 12, 18 20, 22 20, 22 8, 10 8, 10 12, 18 12))>
    >>> buffer(line2, 2, cap_style="flat", join_style="mitre", mitre_limit=1)
    <pygeos.Geometry POLYGON ((18 12, 18 20, 22 20, 21.8 9, 21 8.17, 10 8, 10 12, 18 12))>
    >>> square = Geometry("POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))")
    >>> buffer(square, 2, join_style="mitre")
    <pygeos.Geometry POLYGON ((-2 -2, -2 12, 12 12, 12 -2, -2 -2))>
    >>> buffer(square, -2, join_style="mitre")
    <pygeos.Geometry POLYGON ((2 2, 2 8, 8 8, 8 2, 2 2))>
    >>> buffer(square, -5, join_style="mitre")
    <pygeos.Geometry POLYGON EMPTY>
    >>> buffer(line, float("nan")) is None
    True
    """
    if isinstance(cap_style, str):
        cap_style = BufferCapStyles[cap_style.upper()].value
    if isinstance(join_style, str):
        join_style = BufferJoinStyles[join_style.upper()].value
    if not np.isscalar(quadsegs):
        raise TypeError("quadsegs only accepts scalar values")
    if not np.isscalar(cap_style):
        raise TypeError("cap_style only accepts scalar values")
    if not np.isscalar(join_style):
        raise TypeError("join_style only accepts scalar values")
    if not np.isscalar(mitre_limit):
        raise TypeError("mitre_limit only accepts scalar values")
    if not np.isscalar(single_sided):
        raise TypeError("single_sided only accepts scalar values")
    return lib.buffer(geometry, radius, np.intc(quadsegs), np.intc(cap_style),
                      np.intc(join_style), mitre_limit, np.bool(single_sided),
                      **kwargs)
예제 #32
0
    def test_numpy_scalars_ok(self, all_logical_operators):
        a = pd.array([True, False, None], dtype="boolean")
        op = getattr(a, all_logical_operators)

        tm.assert_extension_array_equal(op(True), op(np.bool(True)))
        tm.assert_extension_array_equal(op(False), op(np.bool(False)))
예제 #33
0
def test_sequence_numpy_boolean(seq):
    expected = [np.bool(True), None, np.bool(False), None]
    arr = pa.array(seq(expected))
    assert arr.type == pa.bool_()
    assert arr.to_pylist() == [True, None, False, None]
예제 #34
0
def gravitational_clustering_numG(afinidades,
                                  delta,
                                  epsilon,
                                  p,
                                  num_clusters,
                                  h,
                                  unitary,
                                  masses=None,
                                  cl_test=False,
                                  normalization=False,
                                  connect=False,
                                  correccion_dim=False,
                                  conexiones=None,
                                  adaptativo=True,
                                  penalty=0,
                                  verbose=False,
                                  sparse_flag=False,
                                  diccionario=None,
                                  diccionario_reverso=None):
    '''
        %%SUMMARY:
            %Applies the modified gravitational clustering algorithm to dataset 
            %"data" in order to get a set of clusters.
    
        %%INPUT:
            %data = double[num_examples, num_vars]
                %data(i, j) -> j-th attribute of i-th example
            %delta = double -> Value of parameter delta (delta > 0)
            %epsilon = double -> Value of parameter epsilon (epsilon > 0)
                %Recommended value for epsilon = 2*delta
            %num_clusters = int -> Number of expected clusters (num_clusters > 0)
            %overlap = int
                %h = 1 -> Unit markovian model.
                %h = 2 -> O(1, 1/m) = (1/m)^p
                %h = 3 -> O(1, 1/m) = sqrt(1/m) / (sqrt(1/m) + max(1 - 1/m, 0))
                %h = 4 -> O(1, 1/m) = sin(pi/2 * (1/m)^p)
        %%OUTPUT:
            %clustering = int[num_examples]
                %clustering(i) -> Cluster the i-th example belongs to
            %real_time = double -> Real time the algorithm has been in execution (debug).
            %simulated_time = double -> Simulated time the clustering has
            %lasted.
    '''
    global linkeos
    _sparse_flag = sparse_flag

    num_particles = afinidades.shape[0]
    vivos = np.ones(num_particles, dtype=bool)

    if conexiones is None:
        conexiones = afinidades.copy() != 0
    if masses is None:
        masses = np.ones(num_particles, np.float64)

    masses = masses.reshape((len(masses), ))
    masas_originales = masses
    masses = masses / np.sum(masses)

    positions = afinidades.copy()

    clustering = np.arange(0, num_particles)
    t = 0
    num_iter = 1

    afinidades_originales = afinidades.copy()
    delta_original = delta
    [_, clustering, positions, afinidades, masses, conexiones, num_particles] = \
        check_collisions(positions, clustering, masses, epsilon, conexiones, afinidades_originales=afinidades_originales,
                         afinidades = afinidades, connect = connect, verbose=verbose, correccion_dim=correccion_dim,
                         diccionario_reverso=diccionario_reverso, diccionario=diccionario, vivos=vivos, masas_originales=masas_originales,
                         linkeos=linkeos)

    best_conf = clustering
    best_conf_count = 0
    actual_conf_count = 0

    cmp_max = len(np.unique(clustering))
    primera = adaptativo
    num_particles = len(masses)

    while num_particles > 1:
        if afinidades.shape[0] > 1 and (
                np.sum(afinidades) - np.sum(np.diag(afinidades)) <= 10E-5):
            #print('Help me to name it!')
            return best_conf

        [velocities, dt,
         distancia_influencia] = compute_G(positions,
                                           masses,
                                           delta,
                                           p,
                                           np.int64(h),
                                           afinidades,
                                           primera=np.bool(primera),
                                           simmetry=np.bool(False),
                                           penalty=np.int64(penalty))
        if primera:
            primera = False
            delta = delta_original

        t = t + dt
        positions = positions + velocities
        actual_conf_count = actual_conf_count + dt


        [shock, clustering_new, positions, afinidades, masses, conexiones, num_particles] = \
            check_collisions(positions, clustering, masses, epsilon, afinidades, connect, afinidades_originales=afinidades_originales,
                             afinidades = afinidades, correccion_dim=correccion_dim, verbose=verbose,
                             diccionario_reverso=diccionario_reverso, diccionario=diccionario, vivos = vivos,
                             masas_originales=masas_originales, target = num_clusters, linkeos=linkeos)

        if not cl_test and shock:
            if num_clusters > 1 and num_particles <= num_clusters:
                return clustering_new
            elif (not unitary) and num_clusters <= 1 and (
                    actual_conf_count * np.log2(num_particles) >=
                    best_conf_count):
                best_conf = clustering
                best_conf_count = actual_conf_count * np.log2(num_particles)

                actual_conf_count = 0
            elif unitary and num_clusters <= 1 and (actual_conf_count >=
                                                    best_conf_count):
                best_conf = clustering
                best_conf_count = actual_conf_count

                actual_conf_count = 0

            clustering = clustering_new
            num_iter = num_iter + 1
            num_particles = len(masses)

        else:
            #current_score = np.sum
            pass

    return best_conf
예제 #35
0
def getBoolValue(x) -> np.bool_:
    return np.bool(x)
예제 #36
0
 def test_to_value_with_np_bool(self):
     value = value_impl.to_value(np.bool(1.0), tf.bool,
                                 context_stack_impl.context_stack)
     self.assertIsInstance(value, value_base.Value)
     self.assertEqual(str(value.type_signature), 'bool')
예제 #37
0
파일: glove.py 프로젝트: Xianan05/BiLstm
# reserving index 1 for unknown word and tokens
word_vocab_index = 2
tag_vocab_index = 2
word2id = {'<pad>': 0, '<unk>': 1}
tag2id = {'<pad>': 0, '<unk>': 1}
id2word = ['<pad>', '<unk>']
id2tag = ['<pad>', '<unk>']

utt_count = 0
temp_startid = 0
for line in open(dataFile, 'r'):
	d=line.split('\t')
	utt = d[0].strip()
	t = d[1].strip()
	if len(d) > 2:
		start = np.bool(int(d[2].strip()))
		starts.append(start)
		if start:
			temp_startid = utt_count
		startid.append(temp_startid)
	#print 'utt: %s, tags: %s' % (utt,t)
	temp_utt = list()
	temp_tags = list()
	mywords = utt.split()
	mytags = t.split()
	if len(mywords) != len(mytags):
		print (mywords)
		print (mytags)
	# now add the words and tags to word and tag dictionaries
	# also save the word and tag sequence in training data sets
	for i in range(len(mywords)):
예제 #38
0
 def test_with_np_bool(self):
     self.assertEqual(str(type_conversions.infer_type(np.bool(True))),
                      'bool')
예제 #39
0
    def _build_communication(self, job_name, task_index):
        """Build the subgraph for communication between actors, memories, and learners
        """
        if job_name in ["actor", "memory"]:
            # data flow: actor --> memory
            dtypes, shapes, self.in_queue_phs = self._get_in_queue_meta()
            self._in_queues = list()
            self._en_in_queues = list()
            self._de_in_queues = list()
            self._close_in_queues = list()
            for i in range(self.distributed_handler.num_memory_hosts):
                with tf.device("/job:memory/task:{}".format(i)):
                    in_q = tf.FIFOQueue(8,
                                        dtypes,
                                        shapes,
                                        shared_name="inqueue{}".format(i))
                    self._in_queues.append(in_q)
                    en_q = in_q.enqueue(self.in_queue_phs)
                    self._en_in_queues.append(en_q)
                    de_q = in_q.dequeue()
                    self._de_in_queues.append(de_q)
                    self._close_in_queues.append(
                        in_q.close(cancel_pending_enqueues=True))
            self._in_queue_size = self._in_queues[
                self.distributed_handler.task_index %
                len(self._in_queues)].size()

        # data flow: memory --> learner
        dtypes, shapes, self.out_queue_phs = self._get_out_queue_meta()
        self._out_queues = list()
        self._en_out_queues = list()
        self._de_out_queues = list()
        self._close_out_queues = list()
        if job_name == "memory":
            for i in range(self.distributed_handler.num_learner_hosts):
                with tf.device("/job:learner/task:{}".format(i)):
                    out_q = tf.FIFOQueue(8,
                                         dtypes,
                                         shapes,
                                         shared_name="outqueue{}".format(i))
                    self._out_queues.append(out_q)
                    en_q = out_q.enqueue(self.out_queue_phs)
                    self._en_out_queues.append(en_q)
                    de_q = out_q.dequeue()
                    self._de_out_queues.append(de_q)
                    self._close_out_queues.append(
                        out_q.close(cancel_pending_enqueues=True))
            self._out_queue_size = self._out_queues[
                self.distributed_handler.task_index %
                len(self._out_queues)].size()

        if job_name == "learner":
            with tf.device("/job:learner/task:{}".format(
                    self.distributed_handler.task_index)):
                out_q = tf.FIFOQueue(8,
                                     dtypes,
                                     shapes,
                                     shared_name="outqueue{}".format(
                                         self.distributed_handler.task_index))
                self._out_queues.append(out_q)
                en_q = out_q.enqueue(self.out_queue_phs)
                self._en_out_queues.append(en_q)
                de_q = out_q.dequeue()
                self._de_out_queues.append(de_q)
                self._close_out_queues.append(
                    out_q.close(cancel_pending_enqueues=True))

        # create an op for actors to obtain the latest vars
        sync_var_ops = list()
        for des, src in zip(self.behavior_model.actor_sync_variables,
                            self.model.actor_sync_variables):
            sync_var_ops.append(tf.assign(des, src))
        self._sync_var_op = tf.group(*sync_var_ops)

        # create some vars and queues for monitoring the training courses
        self._num_sampled_timesteps = tf.get_variable("num_sampled_timesteps",
                                                      dtype=tf.int64,
                                                      initializer=np.int64(0))

        self._learner_done_flags = tf.get_variable(
            "learner_done_flags",
            dtype=tf.bool,
            initializer=np.asarray(self.distributed_handler.num_learner_hosts *
                                   [False],
                                   dtype=np.bool))
        self._actor_done_flags = tf.get_variable(
            "actor_done_flags",
            dtype=tf.bool,
            initializer=np.asarray(self.distributed_handler.num_actor_hosts *
                                   [False],
                                   dtype=np.bool))
        self._should_stop = tf.logical_and(
            tf.reduce_all(self._learner_done_flags),
            tf.reduce_all(self._actor_done_flags))
        if self.distributed_handler.job_name == "learner":
            self._set_stop_flag = tf.assign(
                self._learner_done_flags[self.distributed_handler.task_index],
                np.bool(1),
                use_locking=True)
        if self.distributed_handler.job_name == "actor":
            self._set_stop_flag = tf.assign(
                self._actor_done_flags[self.distributed_handler.task_index],
                np.bool(1),
                use_locking=True)

        self._ready_to_exit = tf.get_variable("global_ready_to_exit",
                                              dtype=tf.bool,
                                              initializer=np.bool(0))
        self._set_ready_to_exit = tf.assign(self._ready_to_exit,
                                            np.bool(1),
                                            use_locking=True)

        self._update_num_sampled_timesteps = tf.assign_add(
            self._num_sampled_timesteps, np.int64(10000), use_locking=True)
예제 #40
0
def experiment(report_every_n=100):
    """Run training operations, then validate.
  
  Args:
    report_every_n: Print loss every n training operations. 0 for no printing.
    
  Returns:
    Validation top-1 accuracy and a numpy array of training losses
  """

    #Placeholders to feed hyperparameters into graph
    learning_rate_ph = tf.placeholder(tf.float32, name="learning_rate")
    beta1_ph = tf.placeholder(tf.float32, shape=(), name="beta1")
    decay_ph = tf.placeholder(tf.float32, shape=(), name="decay")
    gen_scale_ph = tf.placeholder(tf.float32, shape=(), name="gen_scale")
    is_training_ph = tf.placeholder(tf.bool, name="is_training")
    mode_ph = tf.placeholder(tf.int32, name="mode")

    data_dir = "//Desktop-sa1evjv/h/wavefunctions/"
    batch_size = 1  #12

    def load_data_subset(subset):
        return load_data(dir=data_dir, subset=subset, batch_size=batch_size)

    inputs, target_outputs = tf.case({
        tf.equal(mode_ph, 0):
        lambda: load_data_subset("train"),
        tf.equal(mode_ph, 1):
        lambda: load_data_subset("val"),
        tf.equal(mode_ph, 2):
        lambda: load_data_subset("test")
    })

    #Describe learning policy
    start_iter = 49_974  #43_887
    train_iters = 800_000
    val_iters = 1_000

    learning_rate = 0.0001
    beta1 = 0.9

    #Configure operations
    train_op, loss, output = configure(inputs=inputs,
                                       batch_size=batch_size,
                                       target_outputs=target_outputs,
                                       is_training=is_training_ph,
                                       learning_rate=learning_rate_ph,
                                       beta1=beta1_ph,
                                       is_depthwise_sep=False,
                                       decay=decay_ph,
                                       gen_scale=gen_scale_ph)

    #Tensors to dump as visual output
    first_image = inputs[0]
    first_target_output = target_outputs[0]
    first_output = output[0]

    #Session configuration
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  #Only use required GPU memory
    config.gpu_options.force_gpu_compatible = True

    model_dir = f"//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/wavefunctions/{EXPER_NUM}/"

    saver = tf.train.Saver(max_to_keep=1)
    noteable_saver = tf.train.Saver(max_to_keep=1)

    log_filepath = model_dir + "log.txt"
    save_period = 1
    save_period *= 3600
    with tf.Session(config=config) as sess, open(log_filepath,
                                                 "a") as log_file:

        #Initialize network parameters
        feed_dict = {
            is_training_ph: np.bool(True),
            learning_rate_ph: np.float32(learning_rate),
            beta1_ph: np.float32(beta1),
            mode_ph: np.int32(0),
            decay_ph: np.float32(0.),
            gen_scale_ph: np.float32(0.)
        }

        if start_iter:
            saver.restore(sess,
                          tf.train.latest_checkpoint(model_dir + "model/"))
        else:
            sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)

        #Finalize graph to prevent additional nodes from being added
        #sess.graph.finalize()

        #Training
        time0 = time.time()
        for iter in range(start_iter, train_iters):

            is_halfway = iter >= train_iters // 2

            decay = 0.997 if iter else 0.
            lr = learning_rate  #* 0.5**( max( iter//(train_iters//7), 3) )
            is_training = iter < 10_000  #not is_halfway
            beta1 = 0.5 if iter < 200_000 else 0.9

            gen_scale = 1.  #0 if iter < 50 else 1.

            #Feed values into training operations
            feed_dict = {
                is_training_ph: np.bool(is_training),
                learning_rate_ph: np.float32(lr),
                beta1_ph: np.float32(beta1),
                mode_ph: np.int32(0),
                decay_ph: np.float32(decay),
                gen_scale_ph: np.float32(gen_scale)
            }

            if iter in [0, 100, 500] or not iter % 25_000 or (
                    0 <= iter < 10_000
                    and not iter % 1000) or iter == start_iter:
                _, step_loss, [
                    step_image, step_target_output, step_output
                ] = sess.run([
                    train_op, loss,
                    [first_image, first_target_output, first_output]
                ],
                             feed_dict=feed_dict)

                save_input_loc = model_dir + "input-" + str(iter) + ".tif"
                save_truth_loc = model_dir + "truth-" + str(iter) + ".tif"
                save_output_loc = model_dir + "output-" + str(iter) + ".tif"
                step_image = step_image.reshape(cropsize, cropsize)
                Image.fromarray(step_image.astype(
                    np.float32)).save(save_input_loc)
                Image.fromarray(
                    (step_target_output[..., 0] / step_image).astype(
                        np.float32)).save(save_truth_loc)
                Image.fromarray((step_output[..., 0] / step_image).astype(
                    np.float32)).save(save_output_loc)
            else:
                for step_train_op in [
                        train_op[1], train_op[1], train_op[1], train_op[1],
                        train_op
                ]:
                    #step_train_op = train_op[1] if iter < 500 else train_op
                    _, step_loss = sess.run([train_op, loss],
                                            feed_dict=feed_dict)

            output = f"Iter: {iter}"
            for k in step_loss:
                output += f", {k}: {step_loss[k]}"

            if "nan" in output:
                saver.restore(sess,
                              tf.train.latest_checkpoint(model_dir + "model/"))
                #quit()

            if report_every_n:
                if not iter % report_every_n:
                    print(output)

            log_file.write(output)

            if iter in [train_iters // 2 - 1, train_iters - 1]:
                noteable_saver.save(sess,
                                    save_path=model_dir +
                                    "noteable_ckpt/model",
                                    global_step=iter)
                time0 = time.time()
                start_iter = iter
            elif time.time() >= time0 + save_period:
                saver.save(sess,
                           save_path=model_dir + "model/model",
                           global_step=iter)
                time0 = time.time()

        #Validation - super important!
        val_loss = 0.
        for iter in range(val_iters):

            feed_dict = {
                is_training_ph: np.bool(False),
                mode_ph: np.int32(1),
                decay_ph: np.float32(decay)
            }

            step_loss = sess.run(loss, feed_dict=feed_dict)
            val_loss += step_loss

        val_loss /= val_iters
예제 #41
0
    def read_object(self, name, lh5_file, start_row=0, n_rows=sys.maxsize, idx=None, field_mask=None, obj_buf=None, obj_buf_start=0, verbosity=0):
        """ Read LH5 object data from a file

        Parameters
        ----------
        name : str
            Name of the lh5 object to be read (including its group path)
        lh5_file : str or h5py File object, or a list of either
            The file(s) containing the object to be read oad out. If a list of
            files, array-like object data will be contatenated into the output
            object
        start_row : int (optional)
            Starting entry for the object read (for array-like objects). For a
            list of files, only applies to the first file.
        n_rows : int (optional)
            The maximum number of rows to read (for array-like objects). The
            actual number of rows read will be returned as one of the return
            values (see below)
        idx : index array or index tuple, or a list of index arrays/tuples (optional)
            For numpy-style "fancying indexing" for the read. Used to read out
            rows that pass some selection criteria. Only selection along the 1st
            axis is supported, so tuple arguments must be one-tuples.  If n_rows
            is not false, idx will be truncated to n_rows before reading. To use
            with a list of files, can pass in a list of idx's (one for each
            file) or use a long contiguous list (e.g. built from a previous
            identical read). If used in conjunction with start_row and n_rows,
            will be sliced to obey those constraints, where n_rows is
            interpreted as the (max) number of -selected- values (in idx) to be
            read out.
        field_mask : dict or defaultdict { str : bool } (optional)
            For tables and structs, determines which fields get written out.
            Only applies to immediate fields of the requested objects. If a dict
            is used, a defaultdict will be made with the default set to the
            opposite of the first element in the dict. This way if one specifies
            a few fields at "false", all but those fields will be read out,
            while if one specifies just a few fields as "true", only those
            fields will be read out.
        obj_buf : lh5 object (optional)
            Read directly into memory provided in obj_buf. Note: the buffer will
            be expanded to accommodate the data requested. To maintain the
            buffer length, send in n_rows = len(obj_buf)
        obj_buf_start : int (optional)
            Start location in obj_buf for read. For concatenating data to
            array-like objects
        verbosity : bool (optional)
            Turn on verbosity for debugging

        Returns
        -------
        (object, n_rows_read) : tuple
            object is the read-out object
            n_rows_read is the number of rows successfully read out. Essential
            for arrays when the amount of data is smaller than the object
            buffer.  For scalars and structs n_rows_read will be "1". For tables
            it is redundant with table.loc
        """
        #TODO: Ian's idea: add an iterator so one can do something like
        #      for data in lh5iterator(file, chunksize, nentries, ...):
        #          proc.execute()

        # Handle list-of-files recursively
        if isinstance(lh5_file, list):
            n_rows_read = 0
            for i, h5f in enumerate(lh5_file):
                if isinstance(idx, list) and len(idx) > 0 and not np.isscalar(idx[0]):
                    # a list of lists: must be one per file
                    idx_i = idx[i]
                elif idx is not None: 
                    # make idx a proper tuple if it's not one already
                    if not (isinstance(idx, tuple) and len(idx) == 1): idx = (idx,)
                    # idx is a long continuous array
                    n_rows_i = self.read_n_rows(name, h5f)
                    # find the length of the subset of idx that contains indices
                    # that are less than n_rows_i
                    n_rows_to_read_i = bisect_left(idx[0], n_rows_i)
                    # now split idx into idx_i and the remainder
                    idx_i = (idx[0][:n_rows_to_read_i],)
                    idx = (idx[0][n_rows_to_read_i:]-n_rows_i,)
                else: idx_i = None
                n_rows_i = n_rows-n_rows_read
                obj_buf, n_rows_read_i = self.read_object(name,
                                                          lh5_file[i],
                                                          start_row=start_row,
                                                          n_rows=n_rows_i,
                                                          idx=idx_i,
                                                          field_mask=field_mask,
                                                          obj_buf=obj_buf,
                                                          obj_buf_start=obj_buf_start,
                                                          verbosity=verbosity)
                n_rows_read += n_rows_read_i
                if n_rows_read >= n_rows or obj_buf == None:
                    return obj_buf, n_rows_read
                start_row = 0
                obj_buf_start += n_rows_read_i
            return obj_buf, n_rows_read

        # start read from single file. fail if the object is not found
        if verbosity > 0: print("reading", name, "from", lh5_file)

        # get the file from the store
        h5f = self.gimme_file(lh5_file, 'r')
        if name not in h5f:
            print('Store:', name, "not in", lh5_file)
            return None, 0

        # make idx a proper tuple if it's not one already
        if not (isinstance(idx, tuple) and len(idx) == 1): 
            if idx is not None: idx = (idx,)

        # get the object's datatype
        if 'datatype' not in h5f[name].attrs:
            print('Store:', name, 'in file', lh5_file, 'is missing the datatype attribute')
            return None, 0
        datatype = h5f[name].attrs['datatype']
        datatype, shape, elements = parse_datatype(datatype)

        # Scalar
        # scalars are dim-0 datasets
        if datatype == 'scalar': 
            value = h5f[name][()]
            if elements == 'bool': value = np.bool(value)
            if obj_buf is not None:
                obj_buf.value = value
                obj_buf.attrs.update(h5f[name].attrs)
                return obj_buf, 1
            else: return Scalar(value=value, attrs=h5f[name].attrs), 1


        # Struct
        # recursively build a struct, return as a dictionary
        if datatype == 'struct':

            # ignore obj_buf.
            # TODO: could append new fields or overwrite/concat to existing
            # fields. If implemented, get_buffer() above should probably also
            # (optionally?) prep buffers for each field
            if obj_buf is not None:
                print("obj_buf not implemented for structs.  Returning new object")

            # build field_mask
            if field_mask is None: field_mask = defaultdict(lambda : True)
            elif isinstance(field_mask, dict):
                default = True
                if len(field_mask) > 0:
                    default = not field_mask[field_mask.keys[0]]
                field_mask = defaultdict(lambda : default, field_mask)
            elif not isinstance(field_mask, defaultdict):
                print('bad field_mask of type', type(field_mask).__name__)
                return None, 0

            # loop over fields and read
            obj_dict = {}
            for field in elements:
                if not field_mask[field]: continue
                # TODO: it's strange to pass start_row, n_rows, idx to struct
                # fields. If they all had shared indexing, they should be in a
                # table... Maybe should emit a warning? Or allow them to be
                # dicts keyed by field name?
                obj_dict[field], _ = self.read_object(name+'/'+field, 
                                                      h5f, 
                                                      start_row=start_row, 
                                                      n_rows=n_rows, 
                                                      idx=idx,
                                                      verbosity=verbosity)
            # modify datatype in attrs if a field_mask was used
            attrs = dict(h5f[name].attrs)
            if field_mask is not None:
                selected_fields = []
                for field in elements:
                    if field_mask[field]: selected_fields.append(field)
                attrs['datatype'] =  'struct' + '{' + ','.join(selected_fields) + '}'
            return Struct(obj_dict=obj_dict, attrs=attrs), 1

        # Below here is all array-like types. So trim idx if needed
        if idx is not None:
            # chop off indices < start_row
            i_first_valid = bisect_left(idx[0], start_row)
            idxa = idx[0][i_first_valid:]
            # don't readout more than n_rows indices
            idx = (idxa[:n_rows],) # works even if n_rows > len(idxa)

        # Table
        # read a table into a dataframe
        if datatype == 'table':
            col_dict = {}

            # build field_mask
            if field_mask is None: field_mask = defaultdict(lambda : True)
            elif isinstance(field_mask, dict):
                default = True
                if len(field_mask) > 0:
                    default = not (field_mask[list(field_mask.keys())[0]])
                field_mask = defaultdict(lambda : default, field_mask)
            elif not isinstance(field_mask, defaultdict):
                print('bad field_mask of type', type(field_mask).__name__)
                return None, 0

            # read out each of the fields
            rows_read = []
            for field in elements:
                if not field_mask[field] : continue
                fld_buf = None
                if obj_buf is not None:
                    if not isinstance(obj_buf, Table) or field not in obj_buf:
                        print("obj_buf for Table", name, 
                              "not formatted correctly. returning new object")
                        obj_buf = None
                    else: fld_buf = obj_buf[field]
                col_dict[field], n_rows_read = self.read_object(name+'/'+field, 
                                                                h5f, 
                                                                start_row=start_row, 
                                                                n_rows=n_rows,
                                                                idx=idx,
                                                                obj_buf=fld_buf,
                                                                obj_buf_start=obj_buf_start,
                                                                verbosity=verbosity)
                if obj_buf is not None and obj_buf_start+n_rows_read > len(obj_buf):
                    obj_buf.resize(obj_buf_start+n_rows_read, do_warn=(verbosity>0))
                rows_read.append(n_rows_read)
            # warn if all columns don't read in the same number of rows
            n_rows_read = rows_read[0]
            for n in rows_read[1:]:
                if n != n_rows_read:
                    print('table', name, 'got strange n_rows_read', n)
                    print(n_rows_read, 'was expected')

            # modify datatype in attrs if a field_mask was used
            attrs = dict(h5f[name].attrs)
            if field_mask is not None:
                selected_fields = []
                for field in elements:
                    if field_mask[field]: selected_fields.append(field)
                attrs['datatype'] =  'table' + '{' + ','.join(selected_fields) + '}'

            # fields have been read out, now return a table
            if obj_buf is None: 
                table = Table(col_dict=col_dict, attrs=attrs)
                # set (write) loc to end of tree
                table.loc = n_rows_read
                return table, n_rows_read
            else:
                # We have read all fields into the object buffer. Run
                # checks: All columns should be the same size. So update
                # table's size as necessary, warn if any mismatches are found
                obj_buf.resize(do_warn=True)
                # set (write) loc to end of tree
                obj_buf.loc = obj_buf_start+n_rows_read
                #check attributes
                if set(obj_buf.attrs.keys()) != set(attrs.keys()):
                    print('warning: attrs mismatch')
                    print('obj_buf.attrs:', obj_buf.attrs)
                    print('h5f['+name+'].attrs:', attrs)
                return obj_buf, n_rows_read

        # VectorOfVectors
        # read out vector of vectors of different size
        if elements.startswith('array'):
            if obj_buf is not None:
                if not isinstance(obj_buf, VectorOfVectors):
                    print("obj_buf for", name, "not a VectorOfVectors. returning new object")
                    obj_buf = None
            if idx is not None:
                print("warning: fancy indexed readout not implemented for vector of vectors, ignoring idx")
                # TODO: implement idx: first pull out all of cumulative length,
                # use it to build an idx for the data_array, then rebuild
                # cumulative length

            # read out cumulative_length
            cumulen_buf = None if obj_buf is None else obj_buf.cumulative_length
            cumulative_length, n_rows_read = self.read_object(name+'/cumulative_length', 
                                                              h5f, 
                                                              start_row=start_row, 
                                                              n_rows=n_rows,
                                                              obj_buf=cumulen_buf,
                                                              obj_buf_start=obj_buf_start,
                                                              verbosity=verbosity)
            # get a view of just what was read out for cleaner code below
            this_cumulen_nda = cumulative_length.nda[obj_buf_start:obj_buf_start+n_rows_read]

            # determine the start_row and n_rows for the flattened_data readout
            da_start = 0
            if start_row > 0 and n_rows_read > 0: 
                # need to read out the cumulen sample -before- the first sample
                # read above in order to get the starting row of the first
                # vector to read out in flattened_data
                da_start = h5f[name+'/cumulative_length'][start_row-1]

                # check limits for values that will be used subsequently
                if this_cumulen_nda[-1] < da_start:
                    print("warning: cumulative_length non-increasing between entries", 
                          start_row, "and", start_row+n_rows_read, "??")
                    print(this_cumulen_nda[-1], da_start, start_row, n_rows_read)

            # determine the number of rows for the flattened_data readout
            da_nrows = this_cumulen_nda[-1] if n_rows_read > 0 else 0

            # Now done with this_cumulen_nda, so we can clean it up to be ready
            # to match the in-memory version of flattened_data. Note: these
            # operations on the view change the original array because they are
            # numpy arrays, not lists.
            #
            # First we need to substract off the in-file offset for the start of
            # read for flattened_data
            this_cumulen_nda -= da_start

            # Then, if we started with a partially-filled buffer, add the
            # appropriate offset for the start of the in-memory flattened
            # data for this read.
            da_buf_start = 0
            if obj_buf_start > 0:
                da_buf_start = cumulative_length.nda[obj_buf_start-1]
                this_cumulen_nda += da_buf_start

            # Now prepare the object buffer if necessary
            da_buf = None 
            if obj_buf is not None:
                da_buf = obj_buf.flattened_data
                # grow da_buf if necessary to hold the data
                dab_size = da_buf_start + da_nrows
                if len(da_buf) < dab_size: da_buf.resize(dab_size)

            # now read
            flattened_data, dummy_rows_read = self.read_object(name+'/flattened_data', 
                                                               h5f, 
                                                               start_row=da_start, 
                                                               n_rows=da_nrows,
                                                               idx=idx,
                                                               obj_buf=da_buf,
                                                               obj_buf_start=da_buf_start,
                                                               verbosity=verbosity)
            if obj_buf is not None: return obj_buf, n_rows_read
            return VectorOfVectors(flattened_data=flattened_data, 
                                   cumulative_length=cumulative_length, 
                                   attrs=h5f[name].attrs), n_rows_read


        # Array
        # FixedSizeArray
        # ArrayOfEqualSizedArrays
        # read out all arrays by slicing
        if 'array' in datatype:
            if obj_buf is not None:
                if not isinstance(obj_buf, Array):
                    print("obj_buf for", name, "not an Array. returning new object")
                    obj_buf = None

            # compute the number of rows to read
            # we culled idx above for start_row and n_rows, now we have to apply
            # the constraint of the length of the dataset
            ds_n_rows = h5f[name].shape[0]
            if idx is not None:
                if len(idx[0]) > 0 and idx[0][-1] >= ds_n_rows:
                    print("warning: idx indexed past the end of the array in the file. Culling...")
                    n_rows_to_read = bisect_left(idx[0], ds_n_rows)
                    idx = (idx[0][:n_rows_to_read],)
                if len(idx[0]) == 0: print("warning: idx empty after culling.")
                n_rows_to_read = len(idx[0])
            else: n_rows_to_read = ds_n_rows - start_row
            if n_rows_to_read > n_rows: n_rows_to_read = n_rows

            # prepare the selection for the read. Use idx if available
            if idx is not None: source_sel = idx
            else: source_sel = np.s_[start_row:start_row+n_rows_to_read]

            # Now read the array
            if obj_buf is not None and n_rows_to_read > 0:
                buf_size = obj_buf_start + n_rows_to_read
                if len(obj_buf) < buf_size: obj_buf.resize(buf_size)
                dest_sel = np.s_[obj_buf_start:buf_size]
                # NOTE: if your script fails on this line, it may be because you
                # have to apply this patch to h5py (or update h5py, if it's
                # fixed): https://github.com/h5py/h5py/issues/1792
                h5f[name].read_direct(obj_buf.nda, source_sel, dest_sel)
            else: 
                if n_rows == 0: 
                    tmp_shape = (0,) + h5f[name].shape[1:]
                    nda = np.empty(tmp_shape, h5f[name].dtype)
                else: nda = h5f[name][source_sel]

            # special handling for bools
            if elements == 'bool': nda = nda.astype(np.bool)

            # Finally, set attributes and return objects
            attrs=h5f[name].attrs
            if obj_buf is None:
                if datatype == 'array': 
                    return Array(nda=nda, attrs=attrs), n_rows_to_read
                if datatype == 'fixedsize_array': 
                    return FixedSizeArray(nda=nda, attrs=attrs), n_rows_to_read
                if datatype == 'array_of_equalsized_arrays': 
                    return ArrayOfEqualSizedArrays(nda=nda, 
                                                   dims=shape, 
                                                   attrs=attrs), n_rows_to_read
            else:
                if set(obj_buf.attrs.keys()) != set(attrs.keys()):
                    print('warning: attrs mismatch')
                    print('obj_buf.attrs:', obj_buf.attrs)
                    print('h5f['+name+'].attrs:', attrs)
                return obj_buf, n_rows_to_read


        print('Store: don\'t know how to read datatype', datatype)
        return None
예제 #42
0
 def test_bool(self, bool_input):
     b = np.bool(bool_input)
     assert ujson.decode(ujson.encode(b)) == b
예제 #43
0
def flat_surface2grid_mask(surface, min_nsteps, max_deformation):
    '''Computes a mask and corresponding coordinates from a flat surface 
    
    Parameters
    ----------
    surface: Surface
        flat surface
    min_nsteps: int
        minimum number of pixels in x and y direction
    max_deformation: float
        maximum deformation to make a non-flat surface flat.
        The normals of each face must have a dot product with the average
        face normal that is not less than (1-max_deformation); otherwise
        an exception is raised. The rationale for this option is that
        certain surfaces may be almost flat, and projecting the vertices
        on a truly flat surface should be fine. On the other hand, surfaces
        that are definitly not flat (such as full cortical surface models)
        should cause an error to be raised when it is attempted to flatten
        them
        
    Returns
    -------
    x: np.ndarray
        x coordinates of surface
    y: np.ndarray
        y coordinates of surface
    m: np.ndarray
        mask array of size PxQ, with min(P,Q)==min_nsteps.
        m[i,j]==True iff the position at (i,j) is 'inside' the flat surface
    xi: np.ndarray
        vector of length Q with interpolated x coordinates
    yi: np.ndarray
        vector of length P with interpolated y coordinates
    
    Notes
    -----
    The output of this function can be used with scipy.interpolate.griddata
    '''

    surface = surf.from_any(surface)
    x, y = flat_surface2xy(surface, max_deformation)
    xmin = np.min(x)

    xi, yi = unstructured_xy2grid_xy_vectors(x, y, min_nsteps)
    delta = xi[1] - xi[0]
    vi2xi = (x - xmin) / delta

    # compute paths of nodes on the border
    pths = surface.nodes_on_border_paths()

    # map x index to segments that cross the x coordinate
    # (a segment is a pair (i,j) where nodes i and j share a triangle
    #  and are on the border)
    xidx2segments = dict()

    for pth in pths:
        # make a tour across pairs (i,j)
        j = pth[-1]
        for i in pth:
            pq = vi2xi[i], vi2xi[j]
            p, q = min(pq), max(pq)
            # always go left (p) to right (q)
            for pqs in np.arange(np.ceil(p), np.ceil(q)):
                # take each point in between
                ipqs = int(pqs)

                # add to xidx2segments
                if not ipqs in xidx2segments:
                    xidx2segments[ipqs] = list()
                xidx2segments[ipqs].append((i, j))

            # take end point from last iteration as starting point
            # in next iteration
            j = i

    # space for the mask
    yxshape = len(yi), len(xi)
    msk = np.zeros(yxshape, dtype=np.bool_)

    # see which nodes are *inside* a surface
    # (there can be multiple surfaces)
    for ii, xpos in enumerate(xi):
        if not ii in xidx2segments:
            continue
        segments = xidx2segments[ii]
        for jj, ypos in enumerate(yi):
            # based on PNPOLY (W Randoph Franklin)
            # http://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html
            # retrieved Apr 2013
            c = False
            for i, j in segments:
                if ypos < (y[j] - y[i]) * (xpos - x[i]) / (x[j] - x[i]) + y[i]:
                    c = not c
            msk[jj, ii] = np.bool(c)

    return x, y, msk, xi, yi
예제 #44
0
    def read_from_pybar(self, h5name, config, fdac, tdac, masks):

        self._h5name = h5name

        # parse the config file
        print 'read', config
        with open(config, 'r') as config_file:
            for l in config_file.readlines():
                stripped_line = l.strip().split(' ')
                if stripped_line[0] in ('', '#'):
                    continue
                self._pybar_keys.append(stripped_line[0])
                self._pybar_dict[stripped_line[0]] = stripped_line[1]

        # parse the fdac file
        fdac_list = []
        with open(fdac, 'r') as fdac_file:
            for l in fdac_file.readlines():
                stripped_line = l.strip().replace('  ', ' ').split(' ')
                if '#' in stripped_line[0]:
                    continue
                filtered_line = filter(lambda a: a != '', stripped_line[1:])
                int_filt_line = [int(i) for i in filtered_line]
                fdac_list.append(int_filt_line)

        fdac_list_new = [
            fdac_list[2 * i] + fdac_list[2 * i + 1]
            for i in xrange(len(fdac_list) / 2)
        ]

        # parse the tdac file
        tdac_list = []
        with open(tdac, 'r') as tdac_file:
            for l in tdac_file.readlines():
                stripped_line = l.strip().replace('  ', ' ').split(' ')
                if '#' in stripped_line[0]:
                    continue
                filtered_line = filter(lambda a: a != '', stripped_line[1:])
                int_filt_line = [int(i) for i in filtered_line]
                tdac_list.append(int_filt_line)
        tdac_list_new = [
            tdac_list[2 * i] + tdac_list[2 * i + 1]
            for i in xrange(len(tdac_list) / 2)
        ]
        print len(tdac_list[0]), len(tdac_list), tdac_list[0]
        print len(tdac_list[1]), len(tdac_list), tdac_list[1]
        print len(tdac_list_new[0]), len(tdac_list_new), tdac_list_new[0]
        # parse the c_high, c_low and enable file
        lcap_list = []
        scap_list = []
        enable_list = []
        hitbus_list = []
        lists = [lcap_list, scap_list, enable_list]
        for l, mask in zip(lists, masks[:3]):
            print 'read', mask
            with open(mask, 'r') as file:
                for line in file.readlines():
                    if '#' in line:
                        continue
                    stripped_line = line.strip().replace('  ', '-').split('-')
                    joined_line = ''.join(stripped_line[1:])
                    int_line = [int(i) for i in joined_line]
                    l.append(int_line)

        # parse the imon file
        print 'read', masks[3]
        with open(masks[3], 'r') as file:
            for line in file.readlines():
                if '#' in line:
                    continue
                stripped_line = line.strip().replace('  ', '-').split('-')
                joined_line = ''.join(stripped_line[1:])
                int_line = [int(np.invert(np.bool(i))) for i in joined_line]
                hitbus_list.append(int_line)

        # build the pixelconfig list (list of dictionary)
        self._yarr_pixelconfig = []

        for irow, (f, t, lcap, scap, e, hitbus) in enumerate(
                zip(fdac_list_new, tdac_list_new, lcap_list, scap_list,
                    enable_list, hitbus_list)):
            self._yarr_pixelconfig.append({
                'Enable': e,  #','.join((str(item_e) for item_e in e)),
                'FDAC': f,
                'Hitbus': hitbus,
                'LCap': lcap,
                'Row': irow,
                'SCap': scap,
                'TDAC': t
            })

        pass
예제 #45
0
def vidSetupData(curation_path, root, crops_train):
    rootPath = root + "Data/VID/train/"
    MAX_TRACKIDS = 50
    framesIdPath = curation_path + "vid_id_frames.txt"

    videoPaths = []
    videoIds = []
    videoNFrames = []

    with open(framesIdPath, 'r') as vidFiles:
        while True:
            line = vidFiles.readline()
            if not line:
                break

            videoPath, videoId, videoNFrame = [str for str in line.split(' ')]
            videoPaths.append(videoPath)
            videoIds.append(np.uint32(videoId))
            videoNFrames.append(np.uint32(videoNFrame))

        vidFiles.close()
        videoIds = np.array(videoIds)
        videoNFrames = np.array(videoNFrames)

    nVideos = videoIds.shape[0]
    # nVideos = 4367
    imdb = Imdb(nVideos, MAX_TRACKIDS, videoIds, videoNFrames, videoPaths)

    for i in range(0, nVideos):  #
        print("Objects from video %d" % i + "/%d" % nVideos)

        with open(rootPath + imdb.path[i] + ".txt", 'r') as vidFile:
            trackIds = []
            oClasses = []
            framesSize = []
            extents = []
            valids = []
            framePathes = []
            validPerTrackids = []
            targetIdx = 0  #targetIdx here corresponds to l in the Matlab version, however targetIdx starts from 0 rather than 1
            validPerTrackidPath = ""

            while True:
                line = vidFile.readline()
                if (not line) or (len(line) < 1):
                    break

                trackId, oClass, frameW, frameH, oXMins, oYMinx, oWs, ohS, framePath = [
                    str for str in line.split(',')
                ]

                trackId = np.uint8(trackId)
                trackIds.append(trackId)
                oClasses.append(np.uint8(oClass))
                frameW = np.uint16(frameW)
                frameH = np.uint16(frameH)
                framesSize.append([frameW, frameH])
                oXMins = np.int16(oXMins)
                oYMinx = np.int16(oYMinx)
                oWs = np.int16(oWs)
                ohS = np.int16(ohS)
                extents.append([oXMins, oYMinx, oWs, ohS])
                valids.append(np.bool(1))
                _, framePath = [str for str in framePath.split("train/")]
                framePath, _ = [str for str in framePath.split("\n")]
                framePathes.append(framePath)

                if True:  #if valids[length(valids)-1] == True
                    imdb.n_valid_objects[i] += 1
                    imdb.valid_trackids[trackId, i] += 1
                    while trackId + 1 > len(validPerTrackids):
                        tmp = []
                        validPerTrackids.append(tmp)

                    validPerTrackids[trackId].append(np.uint16(targetIdx))

                targetIdx += 1

            imdbObjects = ImdbObjects(trackIds, oClasses, framesSize, extents,
                                      valids, framePathes)
            imdb.objects.append(imdbObjects)
            imdb.valid_per_trackid.append(validPerTrackids)
            imdb.total_valid_objects += imdb.n_valid_objects[i]
            print(imdb.valid_trackids[:, i])

            vidFile.close()
            print("Found %d" % imdb.n_valid_objects[i] +
                  " valid objects in %d" % imdb.nframes[i] + " frames")

    toDelete = np.where(imdb.n_valid_objects < 2)[0]
    imdb = deleteFromImdb(imdb, toDelete)
    toDelete = np.unique(np.where(imdb.valid_trackids == 1)[1])
    imdb = deleteFromImdb(imdb, toDelete)
    saveImdbToPkl(imdb, curation_path, crops_train)
    return imdb
예제 #46
0
 def testBool(self):
     b = np.bool(True)
     self.assertEqual(ujson.decode(ujson.encode(b)), b)
예제 #47
0
def readData(dataFile):
    utterances = list()
    tags = list()
    starts = list()
    startid = list()

    word_vocab_index = 2
    tag_vocab_index = 2
    word2id = {'<pad>': 0, '<unk>': 1}
    tag2id = {'<pad>': 0, '<unk>': 1}
    id2word = ['<pad>', '<unk>']
    id2tag = ['<pad>', '<unk>']

    utt_count = 0
    temp_startid = 0
    for line in open(dataFile, 'r'):
        d = line.split('\t')
        utt = d[0].strip()
        t = d[1].strip()
        if len(d) > 2:
            start = np.bool(int(d[2].strip()))
            starts.append(start)
            if start:
                temp_startid = utt_count
            startid.append(temp_startid)

        temp_utt = list()
        temp_tags = list()
        mywords = utt.split()
        mytags = t.split()
        if len(mywords) != len(mytags):
            print(mywords)
            print(mytags)

        for i in range(len(mywords)):
            if mywords[i] not in word2id:
                word2id[mywords[i]] = word_vocab_index
                id2word.append(mywords[i])
                word_vocab_index += 1
            if mytags[i] not in tag2id:
                tag2id[mytags[i]] = tag_vocab_index
                id2tag.append(mytags[i])
                tag_vocab_index += 1

            temp_utt.append(word2id[mywords[i]])
            temp_tags.append(tag2id[mytags[i]])
        utt_count += 1
        utterances.append(temp_utt)
        tags.append(temp_tags)

    data = {
        'start': starts,
        'startid': startid,
        'utterances': utterances,
        'tags': tags,
        'uttCount': utt_count,
        'id2word': id2word,
        'id2tag': id2tag,
        'wordVocabSize': word_vocab_index,
        'tagVocabSize': tag_vocab_index,
        'word2id': word2id,
        'tag2id': tag2id
    }
    return data
예제 #48
0
 def extract(self, gdb_value: gdb.Value, index: Tuple[int, ...]):
     return np.bool(gdb_value)
예제 #49
0
 def test_infer_type_with_np_bool(self):
     self.assertEqual(str(type_utils.infer_type(np.bool(True))), 'bool')
result_file = './iterative_result.csv'

# load in the data from CSV files
properties_file = '../input/properties_2016.csv'
training_file = '../input/train_2016_v2.csv'
logger.debug('loading properties data from %s' % properties_file)
properties = pd.read_csv(properties_file,
                         dtype={
                             'fireplaceflag': np.bool,
                             'hashottuborspa': np.bool,
                             'propertycountylandusecode': np.str,
                             'propertyzoningdesc': np.str
                         },
                         converters={
                             'taxdelinquencyflag':
                             lambda x: np.bool(True)
                             if x == 'Y' else np.bool(False)
                         })  # avoid mixed type warning
logger.debug('loading training data from %s' % training_file)
train = pd.read_csv(training_file)
logger.debug('data load complete.')

# encode labels as integers as needed
for c in properties.columns:
    properties[c] = properties[c].fillna(1)
    if properties[c].dtype == 'object':
        label_encoder = LabelEncoder()
        label_encoder.fit(list(properties[c].values))
        properties[c] = label_encoder.transform(list(properties[c].values))

do_data_cleanup = True
예제 #51
0
__all__ = (
    'ActionDecoder',
    'Config',
    'get_obs_scale',
    'INITIAL_YAW_ZERO',
    'Key',
    'Obs',
    'PhysEnv',
    'VectorPhysEnv',
)

# These are chosen to match the initial state of the player on the 100m map.
_INITIAL_STATE = {
    'z_pos': np.float32(32.843201),
    'vel': np.array([0, 0, -12], dtype=np.float32),
    'on_ground': np.bool(False),
    'jump_released': np.bool(True)
}
INITIAL_YAW_ZERO = np.float32(90)


class Key(enum.IntEnum):
    """Enumeration of input keys.

    These correspond with action vector indices, eg. action[Key.FORWARD] indicates the agent's desire to move forward.

    Note that the mouse x action (by default continuous) is also included in the action vector, but it isn't included
    here.

    """
    STRAFE_LEFT = 0
예제 #52
0
def experiment(report_every_n=100):
    """Run training operations, then validate.
  
  Args:
    report_every_n: Print loss every n training operations. 0 for no printing.
    
  Returns:
    Validation top-1 accuracy and a numpy array of training losses
  """

    #Placeholders to feed hyperparameters into graph
    learning_rate_ph = tf.placeholder(tf.float32, name="learning_rate")
    beta1_ph = tf.placeholder(tf.float32, shape=(), name="beta1")
    is_training_ph = tf.placeholder(tf.bool, name="is_training")
    mode_ph = tf.placeholder(tf.int32, name="mode")

    def load_data_subset(subset):
        return load_data(
            dir="//Desktop-sa1evjv/f/wavefunctions/wavefunctions/",
            subset=subset,
            batch_size=32)

    inputs, target_outputs = tf.case({
        tf.equal(mode_ph, 0):
        lambda: load_data_subset("train"),
        tf.equal(mode_ph, 1):
        lambda: load_data_subset("val"),
        tf.equal(mode_ph, 2):
        lambda: load_data_subset("test")
    })

    #Describe learning policy
    start_iter = 0
    train_iters = 100_000
    val_iters = 1_000

    learning_rate = 0.001
    beta1 = 0.9

    #Configure operations
    train_op, loss, output = configure(inputs=inputs,
                                       target_outputs=target_outputs,
                                       is_training=is_training_ph,
                                       learning_rate=learning_rate_ph,
                                       beta1=beta1_ph,
                                       is_depthwise_sep=False)

    #Tensors to dump as visual output
    first_image = inputs[0]
    first_target_output = target_outputs[0]
    first_output = output[0]

    #Session configuration
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  #Only use required GPU memory
    config.gpu_options.force_gpu_compatible = True

    model_dir = "//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/wavefunctions/1/"

    #"//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/magnifier/"

    saver = tf.train.Saver(max_to_keep=1)

    log_filepath = model_dir + "log.txt"
    save_period = 1
    save_period *= 3600
    with tf.Session(config=config) as sess, open(log_filepath,
                                                 "a") as log_file:

        #Initialize network parameters
        feed_dict = feed_dict = {
            is_training_ph: np.bool(True),
            learning_rate_ph: np.float32(learning_rate),
            beta1_ph: np.float32(beta1),
            mode_ph: np.int32(0)
        }

        if start_iter:
            saver.restore(sess,
                          tf.train.latest_checkpoint(model_dir + "model/"))
        else:
            sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)

        #Training
        training_losses = np.zeros((train_iters - start_iter))
        time0 = time.time()
        for iter in range(start_iter, train_iters):

            is_halfway = iter >= train_iters // 2

            lr = learning_rate * 0.5**(iter // (train_iters // 6))
            is_training = not is_halfway
            beta1 = 0.5 if is_halfway else 0.9

            #Feed values into training operations
            feed_dict = {
                is_training_ph: np.bool(is_training),
                learning_rate_ph: np.float32(lr),
                beta1_ph: np.float32(beta1),
                mode_ph: np.int32(0)
            }

            if iter in [0, 100, 500] or not iter % 25_000 or (
                    0 <= iter < 10_000
                    and not iter % 1000) or iter == start_iter:
                _, step_loss, [
                    step_image, step_target_output, step_output
                ] = sess.run([
                    train_op, loss,
                    [first_image, first_target_output, first_output]
                ],
                             feed_dict=feed_dict)

                save_input_loc = model_dir + "input-" + str(iter) + ".tif"
                save_truth_loc = model_dir + "truth-" + str(iter) + ".tif"
                save_output_loc = model_dir + "output-" + str(iter) + ".tif"
                Image.fromarray(
                    step_image.reshape(cropsize, cropsize).astype(
                        np.float32)).save(save_input_loc)
                Image.fromarray(
                    step_target_output.reshape(cropsize, cropsize).astype(
                        np.float32)).save(save_truth_loc)
                Image.fromarray(
                    step_output.reshape(cropsize, cropsize).astype(
                        np.float32)).save(save_output_loc)
            else:
                _, step_loss = sess.run([train_op, loss], feed_dict=feed_dict)

            training_losses[iter - start_iter] = step_loss

            output = f"Iter: {iter}, Loss: {step_loss}"
            if report_every_n:
                if not iter % report_every_n:
                    print(output)

            log_file.write(output)

            if time.time() >= time0 + save_period:
                saver.save(sess,
                           save_path=model_dir + "model/model",
                           global_step=iter)
                time0 = time.time()

        #Validation - super important!
        dataset.initialize_iterator(sess=sess, mode="val")

        val_loss = 0.
        for iter in range(val_iters):

            feed_dict = {is_training_ph: np.bool(False), mode_ph: np.int32(1)}

            step_loss = sess.run(loss, feed_dict=feed_dict)
            val_loss += step_loss

        val_loss /= val_iters
예제 #53
0
    def barycenter_broaden(self,
                           rv: float = 30.0,
                           consecutive_test: bool = False):
        """Sweep telluric mask symmetrically by +/- a velocity.

        Updates the objects mask.

        Parameters
        ----------
        rv: float
            Velocity to extend masks in km/s. Default is 30 km/s.
        consecutive_test: bool
            Checks for 3 consecutive zeros to mask out transmission. Default is False.

        """
        if self.shifted:
            warnings.warn("Detected that 'shifted' is already True. "
                          "Check that you want to rv extend the masks again.")
        rv_mps = rv * 1e3  # Convert from km/s into m/s

        shift_amplitudes = self.wl * rv_mps / const.c.value
        # Operate element wise
        blue_shifts = self.wl - shift_amplitudes
        red_shifts = self.wl + shift_amplitudes

        bary_mask = []
        for (blue_wl, red_wl, mask) in zip(blue_shifts, red_shifts, self.mask):
            if mask == 0:
                this_mask_value = False
            else:
                # np.searchsorted is faster then the boolean masking wavelength range
                # It returns index locations to put the min/max doppler-shifted values
                slice_limits = np.searchsorted(self.wl, [blue_wl, red_wl])
                slice_limits = [
                    index if (index < len(self.wl)) else len(self.wl) - 1
                    for index in slice_limits
                ]  # Fix searchsorted end index

                mask_slice = self.mask[slice_limits[0]:slice_limits[1]]

                if consecutive_test:
                    # Mask value False if 3 or more consecutive zeros in slice.
                    len_consec_zeros = consecutive_truths(~mask_slice)
                    if np.all(
                            ~mask_slice
                    ):  # All pixels of slice is zeros (shouldn't get here)
                        this_mask_value = False
                    elif np.max(len_consec_zeros) >= 3:
                        this_mask_value = False
                    else:
                        this_mask_value = True
                        if np.sum(~mask_slice) > 3:
                            if self.verbose:
                                print((
                                    "There were {0}/{1} zeros in this "
                                    "barycentric shift but None were 3 consecutive!"
                                ).format(np.sum(~mask_slice), len(mask_slice)))

                else:
                    this_mask_value = np.bool(
                        np.product(mask_slice))  # Any 0s will make it 0

                # Checks
                if not this_mask_value:
                    assert np.any(~mask_slice)
                else:
                    if not consecutive_test:
                        assert np.all(mask_slice)
            bary_mask.append(this_mask_value)
        self.mask = np.asarray(bary_mask, dtype=np.bool)
        self.shifted = True
예제 #54
0
def experiment(report_every_n=100):
    """Run training operations, then validate.
  
  Args:
    report_every_n: Print loss every n training operations. 0 for no printing.
    
  Returns:
    Validation top-1 accuracy and a numpy array of training losses
  """

    #Placeholders to feed hyperparameters into graph
    learning_rate_ph = tf.placeholder(tf.float32, name="learning_rate")
    beta1_ph = tf.placeholder(tf.float32, shape=(), name="beta1")
    decay_ph = tf.placeholder(tf.float32, shape=(), name="decay")
    gen_scale_ph = tf.placeholder(tf.float32, shape=(), name="gen_scale")
    is_training_ph = tf.placeholder(tf.bool, name="is_training")
    mode_ph = tf.placeholder(tf.int32, name="mode")

    #data_dir = "//Desktop-sa1evjv/h/wavefunctions/"
    data_dir = "//Desktop-sa1evjv/f/wavefunctions_single/wavefunctions/"
    batch_size = 24

    def load_data_subset(subset):
        return load_data(dir=data_dir, subset=subset, batch_size=batch_size)

    inputs, target_outputs = tf.case({
        tf.equal(mode_ph, 0):
        lambda: load_data_subset("train"),
        tf.equal(mode_ph, 1):
        lambda: load_data_subset("val"),
        tf.equal(mode_ph, 2):
        lambda: load_data_subset("test")
    })

    #Describe learning policy
    start_iter = 1
    train_iters = 500_000
    val_iters = 1_000

    learning_rate = 0.0002
    beta1 = 0.9

    #Configure operations
    train_op, loss, output, error_dist = configure(
        inputs=inputs,
        batch_size=batch_size,
        target_outputs=target_outputs,
        is_training=is_training_ph,
        learning_rate=learning_rate_ph,
        beta1=beta1_ph,
        is_depthwise_sep=False,
        decay=decay_ph,
        gen_scale=gen_scale_ph)

    clip_op = tf.get_collection("clip_weights")

    #Tensors to dump as visual output
    first_image = inputs[0]
    first_target_output = target_outputs[0]
    first_output = output[0]

    #Session configuration
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  #Only use required GPU memory
    config.gpu_options.force_gpu_compatible = True

    model_dir = f"//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/wavefunctions/{EXPER_NUM}/"

    saver = tf.train.Saver(max_to_keep=1)
    noteable_saver = tf.train.Saver(max_to_keep=1)

    log_filepath = model_dir + "log.txt"
    save_period = 1
    save_period *= 3600
    with tf.Session(config=config) as sess, open(log_filepath,
                                                 "a") as log_file:

        #Initialize network parameters
        feed_dict = feed_dict = {
            is_training_ph: np.bool(True),
            learning_rate_ph: np.float32(learning_rate),
            beta1_ph: np.float32(beta1),
            mode_ph: np.int32(0)
        }

        sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)
        if start_iter:
            #saver.restore(
            #    sess,
            #    tf.train.latest_checkpoint(model_dir+"model/")
            #    )
            noteable_saver.restore(
                sess, tf.train.latest_checkpoint(model_dir + "noteable_ckpt/"))

        ##Finalize graph to prevent additional nodes from being added
        #sess.graph.finalize()

        #Training
        time0 = time.time()
        errors = []
        for iter in range(start_iter, train_iters):
            print(f"Iter: {iter}")

            is_halfway = iter >= train_iters // 2

            lr = learning_rate * 0.5**(max(iter // (train_iters // 7), 7))
            is_training = False  #iter < 50_000# or not is_halfway
            beta1 = 0.9  #0.5 if is_halfway else 0.9

            #Feed values into training operations
            feed_dict = {
                is_training_ph: np.bool(is_training),
                learning_rate_ph: np.float32(lr),
                beta1_ph: np.float32(beta1),
                mode_ph: np.int32(0)
            }

            save_loc = model_dir + "train_loc.npy"
            if iter < 20000 // batch_size:
                err = sess.run(error_dist, feed_dict=feed_dict)
                errors.append(err)
                print(np.mean(err))
                if not iter % (1000 // batch_size):
                    errors0 = np.concatenate(tuple(errors), axis=0)
                    np.save(save_loc, errors0)

                continue
            else:
                errors0 = np.concatenate(tuple(errors), axis=0)
                np.save(save_loc, errors0)
                quit()
            #_, step_loss = sess.run([train_op, loss], feed_dict=feed_dict)

            output = f"Iter: {iter}"
            for k in step_loss:
                output += f", {k}: {step_loss[k]}"
            print(output)

            #output = f"Iter: {iter}, Loss: {step_loss}"
            #if report_every_n:
            #  if not iter % report_every_n:
            #    print(output)

            if "nan" in output:
                saver.restore(sess,
                              tf.train.latest_checkpoint(model_dir + "model/"))
                #quit()

            log_file.write(output)

            if iter in [train_iters // 2 - 1, train_iters - 1]:
                noteable_saver.save(sess,
                                    save_path=model_dir +
                                    "noteable_ckpt/model",
                                    global_step=iter)
                time0 = time.time()
                start_iter = iter
            elif time.time() >= time0 + save_period:
                saver.save(sess,
                           save_path=model_dir + "model/model",
                           global_step=iter)
                time0 = time.time()

        #Validation - super important!
        val_loss = 0.
        for iter in range(val_iters):

            feed_dict = {is_training_ph: np.bool(False), mode_ph: np.int32(1)}

            step_loss = sess.run(loss, feed_dict=feed_dict)
            val_loss += step_loss

        val_loss /= val_iters

    return val_loss
예제 #55
0
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = params["bracket_gpu_id"]

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = timm.create_model(params["bracket_model_name"], pretrained=False)
    model.to(device)
    model.eval()
    checkpoint = torch.load(params["bracket_weight_path"])
    model.load_state_dict(checkpoint)
    transform = T.Compose([
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    sock = su.initialize_client(params["tcp_ip"], params["bracket_tcp_port"])

    while True:
        img = su.recvall_image(sock) 
        img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_LINEAR)
        img = transform(img).unsqueeze(0)
        output = model(img.to(device))

        topk=(1,)
        maxk = max(topk)
        _, pred = output.topk(maxk, 1, True, True)
        pred = np.bool(pred.t()[0].cpu().detach().numpy())
        pred = not pred
        su.sendall_pickle(sock, pred)

    sock.close()
예제 #56
0
 def test_isbool(self):
   self.assertTrue(numeric.isbool(True))
   self.assertTrue(numeric.isbool(numpy.bool(True)))
   self.assertTrue(numeric.isbool(numpy.array(True)))
   self.assertFalse(numeric.isbool(numpy.array([True])))
예제 #57
0
filepath = '/global/cscratch1/sd/sukhdeep/mb2_subsample/'
ptype = 0
snapArray = [85]

#particleFileIO.particleFile(filepath,col_def,fraction,num_files,ptype)
#particles=particleFileIO.readFITSFile('/global/cscratch1/sd/sukhdeep/mb2_subsample/',ptype,snapArray)
#NNPairs=np.zeros((sightBins,nbins))
#NRPairs=np.zeros((sightBins,nbins))
#RRPairs=np.zeros((sightBins,nbins))
#RNPairs=np.zeros((sightBins,nbins))
#print(filename,col_names,cuts,sightBins,rscale,nbins,min_sep,max_sep,rpar_step,min_box,max_box,logfile,savefile,key)
#corFunc.corFunc(filename,col_names,cuts,sightBins,rscale,nbins,min_sep,max_sep,rpar_min,rpar_max,min_box,max_box,'NG',logfile,savefile,key=key,fname2='yes',dat2=particles)

ptype = [0, 1, 4]
combine = np.bool(sys.argv[5])
dir = sys.argv[6]

print(cuts, rscale, nbins, min_sep, combine, dir)
if combine == True:
    print('hello')
    for j in snapArray:
        particles = table.Table()
        particles['x'] = []
        particles['y'] = []
        particles['z'] = []
        particles['mass'] = []
        key = 'snap_' + str(j)
        savefile = '/global/homes/h/hmarti21/Results/' + dir + '/TreeCorrData_' + str(
            j) + 'combine.txt'
        logfile = '/global/homes/h/hmarti21/Logs/' + dir + '/log_' + str(
예제 #58
0
 def test_capture_result_with_np_bool(self):
   with tf.Graph().as_default() as graph:
     type_spec, binding = tensorflow_utils.capture_result_from_graph(
         np.bool(True), graph)
   self._assert_captured_result_eq_dtype(type_spec, binding, 'bool')
 def step(self, action, reward, current_state, terminate):
     if self.start:
         self.start = False
     else:
         self.controller.step(copy.deepcopy(self.last_state), action, np.array(reward.last_reward, dtype=np.float32),
                              current_state, np.bool(terminate))
예제 #60
0
 def test_bool_scalar_to_tensor_and_back(self):
     x = np.bool()
     tensor_proto = np_to_tensor_proto(x)
     x_restored = tensor_proto_to_np(tensor_proto)
     assert x == x_restored