Пример #1
0
	def contour_plot(self, box, nc=10, z_max=-2e20, z_min=2e20,
					 color='k', labels=True, precision=5, ptype="sig_figs",
					 contours=[], neg_ls="dashed", pos_ls="solid", lw=1.0):
		"""plot the field as a contour"""
		# adapted to work with a cpdn_box
		# have to copy LON and LAT so they can be manipulated
		LON = copy(box.get_dimension("X").get_values())
		LAT = copy(box.get_dimension("Y").get_values())
		Z = numpy.array(box.get_values().squeeze())
		# translate the LON, LAT and Z to the coordinate system
		LON, LAT, Z = self.prepare_data(LON, LAT, Z)
		# create the contour values to draw
		if contours != []:
			V = contours
		elif z_max != -2e20 and z_min != 2e20:
			V = numpy.linspace(z_min, z_max, nc)
		else:
			V = nc
		# get the line styles for the contours
		ls = []
		for v in V:
			if v < 0.0:
				ls.append(neg_ls)
			else:
				ls.append(pos_ls)
		# draw the contours - all in the same color
		CS = self.sp.contour(LON, LAT, Z, V, colors=color, linewidths=lw, linestyles=ls)
		# do the labels
		format = self.get_format_string(precision, ptype)
		self.sp.clabel(CS, fontsize=8, inline=True, inline_spacing=-2, 
					   fmt=format)
Пример #2
0
def send_mail_mime(request, to, frm, subject, msg, cc=None, extra=None, toUser=False, bcc=None):
    """Send MIME message with content already filled in."""
    
    condition_message(to, frm, subject, msg, cc, extra)

    # start debug server with python -m smtpd -n -c DebuggingServer localhost:2025
    # then put USING_DEBUG_EMAIL_SERVER=True and EMAIL_HOST='localhost'
    # and EMAIL_PORT=2025 in settings_local.py
    debugging = getattr(settings, "USING_DEBUG_EMAIL_SERVER", False) and settings.EMAIL_HOST == 'localhost' and settings.EMAIL_PORT == 2025

    if test_mode or debugging or settings.SERVER_MODE == 'production':
        with smtp_error_logging(send_smtp) as logging_send:
            with smtp_error_user_warning(logging_send,request) as send:
                send(msg, bcc)
    elif settings.SERVER_MODE == 'test':
	if toUser:
	    copy_email(msg, to, toUser=True, originalBcc=bcc)
	elif request and request.COOKIES.has_key( 'testmailcc' ):
	    copy_email(msg, request.COOKIES[ 'testmailcc' ],originalBcc=bcc)
    try:
	copy_to = settings.EMAIL_COPY_TO
    except AttributeError:
        copy_to = "*****@*****.**" % settings.SERVER_MODE
    if copy_to and not test_mode and not debugging: # if we're running automated tests, this copy is just annoying
        if bcc:
            msg['X-Tracker-Bcc']=bcc
        with smtp_error_logging(copy_email) as logging_copy:
            with smtp_error_user_warning(logging_copy,request) as copy:
                copy(msg, copy_to,originalBcc=bcc)
Пример #3
0
def backup(src, prefix='.'):
    """Backup (copy) `src` to <src><prefix><num>, where <num> is an integer
    starting at 0 which is incremented until there is no destination with that
    name.
    
    Symlinks are handled by shutil.copy() for files and shutil.copytree() for
    dirs. In both cases, the content of the file/dir pointed to by the link is
    copied.

    Parameters
    ----------
    src : str
        name of file/dir to be copied
    prefix : str, optional
    """
    if os.path.isfile(src):
        copy = shutil.copy 
    elif os.path.isdir(src):
        copy = shutil.copytree
    else:
        raise StandardError("source '%s' is not file or dir" %src)
    idx = 0
    dst = src + '%s%s' %(prefix,idx)
    while os.path.exists(dst):
        idx += 1
        dst = src + '%s%s' %(prefix,idx)
    # sanity check
    if os.path.exists(dst):
        raise StandardError("destination '%s' exists" %dst)
    else:        
        copy(src, dst)
Пример #4
0
def extendRow(list,fig) :

    if len(list) > 5 :
        return

    l = copy(list)
    f = copy(fig)

    f[numbers2[l[-1]]] = 1


    s34 = str(l[-1])[-2:]
    matches = numbers2contains(s34)

    for match in matches :
        index = -1
        try :
            index = l.index(match)
        except:
            pass

        if f.has_key(numbers2[match]) == False and index == -1:
            extendRow(l + [match], f)



            if len(l) == 5 and str(l[0])[:2] == str(match)[-2:] :
                print str(l + [match]) + str(f) + "!!!!!!!!"
    return
Пример #5
0
    def build(self):
        try:
            # Find home dir
            if platform == "android":
                _home = "/storage/emulated/0/Android/data/"
            else:
                _home = os.path.expanduser("~")
            # Check if there is a settings file there
            _config_path = os.path.join(_home, "se.optimalbpm.optimal_file_sync/config.txt")
            # Raise error if non existing config
            if not os.path.exists(_config_path):
                if not os.path.exists(os.path.join(_home, "se.optimalbpm.optimal_file_sync")):
                    os.mkdir(os.path.join(_home, "se.optimalbpm.optimal_file_sync"))
                if platform == "android":
                    copy("default_config_android.txt", _config_path)
                else:
                    copy("default_config_linux.txt", _config_path)

                notification.notify("Optimal File Sync Service", "First time, using default config.")

            self.settingsframe = SettingsFrame()
            self.settingsframe.init(_cfg_file=_config_path)
            return self.settingsframe
        except Exception as e:
            notification.notify("Optimal File Sync Service", "Error finding config :" + str(e))
Пример #6
0
 def Evaluate( self ,queueD, queueCodeN , queueN, ifile ,Population ):
     i = 0
     self.X = []
     self.ListNonDominated = []
     self.lockList.acquire()
     self.ListCodedNonDominated =[]# copy(queueCodeN)
     self.ListDominated = copy(queueD)
     self.lockList.release()
     while i < 20:   #????
             self.RealisableSpace( Population )
             j = len(self.X)-1
             self.Dominate(queueD,j);
             i = i +1
     self.RealisableSpace( Population )
     j = len(self.X)-1
     self.Dominate(queueD,j);
     self.lock.acquire()
     self.Tofile('data',self.X,ifile , 1)
     self.lock.release()
     fonction1 = eval(self.fonctionObject[0])
     fonction2 = eval(self.fonctionObject[1])
     i = 0
     while i < len(self.X):
         if self.X[i] not in self.ListDominated:
             self.ListNonDominated.append(copy(self.X[i]))
             self.ListCodedNonDominated.append(copy(self.CodedX[i]))
             self.lockList.acquire()
             queueCodeN.append(copy(self.CodedX[i]))
             queueN.append(copy(self.X[i]))                
             self.lockList.release()
         i = i +1
def AnIPsolver(constraints, obj,Qorbit): #objective function must contain integer terms (adjust it so that is the case)
    #we make a copy of the constraints so as to not lose them
    A1 = copy(constraints)
    A2 = copy(constraints)
    #we generate temporary maxes and mins
    cmax = probGenerate(A1, obj, 'Max') #get the current max
    cmin = probGenerate(A2, obj, 'Min') #get the current min
    currentmax = cmax[2]
    currentmin = cmin[2]

    #proper bounding would create a tighter space 
    currentsolution = 0
    while True:
        if (currentmax- currentmin) < 1 or currentmin > currentmax: #if these two planes get within distance 1 of each other, then there really won't be an integer point 
            return [currentsolution,temptarg]
        temptarg = (currentmax + currentmin)/2
        #we make a shallow copy of the constraints
        tconstraint = copy(constraints)
        #we then slice the set by adding in the expected performance we desire
        tconstraint += [obj + [-1]+[temptarg]] #add on the bound obj >= thisvalue
        tconstraint += [obj + [1] + [currentmax]] #make sure it is less than the current maximum (so we don't repeat work)
        #now we check for an integer solution
        endresult = autoSolver(tconstraint,Qorbit)
        if endresult[0] == 'No Integer Solution': #this is too tight of a bound
            currentmax = temptarg-1 #there is nothing with a max greater than this so we can set it to the middle

        if endresult[0] == 'Integer Solution Discovered':
            
            currentmin = temptarg+1 #we expect at least this much performance so something slightly tighter is given
            currentsolution = endresult[1:] #grab the value array along with objective value
            print [currentsolution,temptarg]
Пример #8
0
def get(src, dest):
    from hadoop import hdfs
    "Copies from source to dest, handles hadoop get if src prefixed with hdfs://"  
    if hdfs.isHdfs(src)  :
        hdfs.get(src, dest)
    else:
        copy(src, dest)
Пример #9
0
 def PairedPermutation(self, x, y):
     """
     This method performs a permutation test for matched samples upon 2 
     data vectors. This code was modified from Segal.
     Usage: PairedPermutation(x,y)
     Returns: utail, nperm, crit, prob
     """
     self.utail = 0
     self.nperm = 0
     self.crit = 0.0
     d = []
     d.append(copy(x))
     d.append(copy(x))
     d.append(copy(y))
     index = [1]*self.d1.N
     for i in range(self.d1.N):
         d[1][i] = x[i]-y[i]
         d[2][i] = y[i]-x[i]
         self.crit = self.crit + d[1][i]
     #for j in range((self.d1.N-1), 0, -1):
     while 1:
         sum = 0
         for i in range(self.d1.N):
             sum = sum + d[index[i]][i]
         self.nperm = self.nperm + 1
         if (sum >= self.crit):
             self.utail = self.utail + 1
         for i in range((self.d1.N-1), 0, -1):
             if (index[i] == 1):
                 index[i] = 2
                 continue
             index[i] = 1
         break
     self.prob = float(self.utail / self.nperm)
     return self.prob
Пример #10
0
def Euler(a,b,x0=1,t0=0,tn=1,n=200):
    x,t =symbols('x t')
    T=tn-t0
    nt=n
    dt = T/nt
    sqrtdt = sqrt(dt)
    #t=array([])
    t = linspace(t0, tn, nt+1)    # 101 points between 0 and 3
    #e=ones([1,size(t)])
    
    X=0.*copy(t)
    #y=0.*copy(t)
    dw=0.*copy(t)
    
    X[0]=x0
    #y[0]=x0
    for j in range(0, nt):
        #dw.append(random.gauss(0, sqrtdt))
        #dw[j]= gauss(0, sqrtdt)
        dw[j]=random.randn()*sqrtdt
        #y[j+1]= y[j]+(A+B*y[j])*dt
        if diff(a,x)==0:
           ax=a
        else:
           ax=a.subs({x:X[j]})
        if diff(b,x)==0:
           bx=b
        else:
           bx=b.subs({x:X[j]})   
        X[j+1]= X[j]+ax*dt+bx*dw[j]
    return X
Пример #11
0
Файл: c.py Проект: issy-kn/ALDS1
def dfs(depth, prev):
    global state, path
    if state.MD == 0:
        return True
    # 現在の深さにヒューリスティックを足して制限を超えたら枝を刈る
    if depth + state.MD > limit:
        return False

    sx = state.space / N
    sy = state.sapce % N
    tmp = Puzzle()

    for r in xrange(4):
        tx = sx + dx[r]
        ty = sy + dy[r]
        if tx < 0 or ty < 0 or tx >= N or ty >= N:
            continue
        if max(prev, r) - min(prev, r) == 2:
            continue
        tmp = copy(state)
        # マンハッタン距離の差分を計算しつつ、ピースをスワップ
        txy = tx * N + ty
        sxy = sx * N + sy
        state.MD -= MDT[txy][state.f[txy] - 1]
        state.MD += MDT[sxy][state.f[txy] - 1]
        state.f[txy], state.f[sxy] = state.f[sxy], state.f[txy]
        state.space = txy
        if dfs(depth + 1, r):
            path[depth] = r
            return True
        state = copy(tmp)

    return False
Пример #12
0
 def state_push(self):
     """
     Save the current state of the output function to an internal stack.
     """
    
     self.__current_state_stack.append((copy(self.t), copy(self.y_avg), copy(self.first_call)))
     super(SimpleHomeoLinear, self).state_push()
Пример #13
0
	def prevision(self, gravite, tick, iterations):
		if not self.previsions:
			self.previsions = [copy(self)]
		for i in range(len(self.previsions), iterations):
			prevision = copy(self.previsions[-1])
			prevision.sim(gravite, tick)
			self.previsions.append(prevision)
Пример #14
0
    def test_COPY_verb_with_storlets(self):
        source = '/v1/AUTH_a/c/so'
        target = '/v1/AUTH_a/c/to'
        destination = 'c/to'
        self.app.register('GET', source, HTTPOk, body='source body')
        self.app.register('PUT', target, HTTPCreated)
        storlet = '/v1/AUTH_a/storlet/Storlet-1.0.jar'
        self.app.register('GET', storlet, HTTPOk, body='jar binary')

        def copy(target, source, destination):
            req = Request.blank(source, environ={'REQUEST_METHOD': 'COPY'},
                                headers={'Destination': destination,
                                         'X-Run-Storlet': 'Storlet-1.0.jar',
                                         'X-Backend-Storage-Policy-Index': 0})
            app = self.get_app(self.app, self.conf)
            app(req.environ, self.start_response)
            self.assertEqual('201 Created', self.got_statuses[-1])
            get_calls = self.app.get_calls('GET', source)
            self.assertEqual(len(get_calls), 1)
            self.assertEqual(get_calls[-1][3], '')
            self.assertEqual(get_calls[-1][1], source)
            put_calls = self.app.get_calls('PUT', target)
            self.assertEqual(len(put_calls), 1)
            self.assertEqual(put_calls[-1][3], 'source body')
        with storlet_enabled():
            copy(target, source, destination)
Пример #15
0
    def test01_init(self):
        "Testing LayerMapping initialization."

        # Model field that does not exist.
        bad1 = copy(city_mapping)
        bad1['foobar'] = 'FooField'

        # Shapefile field that does not exist.
        bad2 = copy(city_mapping)
        bad2['name'] = 'Nombre'

        # Nonexistent geographic field type.
        bad3 = copy(city_mapping)
        bad3['point'] = 'CURVE'

        # Incrementing through the bad mapping dictionaries and
        # ensuring that a LayerMapError is raised.
        for bad_map in (bad1, bad2, bad3):
            try:
                lm = LayerMapping(City, city_shp, bad_map)
            except LayerMapError:
                pass
            else:
                self.fail('Expected a LayerMapError.')

        # A LookupError should be thrown for bogus encodings.
        try:
            lm = LayerMapping(City, city_shp, city_mapping, encoding='foobar')
        except LookupError:
            pass
        else:
            self.fail('Expected a LookupError')
 def __deepcopy__(self, memo):
     res = self.__class__()
     res.module = self.module
     res.names = copy(self.names)
     res.last_name = self.last_name
     res.attrs = copy(self.attrs)
     return res
Пример #17
0
 def executeAlgorithm(self):
     self.predictedData = []
     
     for index, sample in enumerate(self.inputData):
         if sample.time < self.predictionInterval:
             # Set initial values
             predictionSample = PredictionSample()
             predictionSample.sample = copy(sample)
             predictionSample.velocity = Vector()
             self.predictedData.append( predictionSample )
         else:
             shift = index - self.predictionInterval / self.samplingInterval
             
             # Calculate velocity
             deltaPosition = self.inputData[index].position - \
                             self.inputData[shift].position
             deltaTime = self.inputData[index].time - \
                         self.inputData[shift].time
             invDeltaTimeVector = Vector( 1 / float(deltaTime), \
                                          1 / float(deltaTime), \
                                          1 / float(deltaTime))
             velocity = deltaPosition * invDeltaTimeVector
                            
             # Populate data structures  
             predictionSample = PredictionSample()            
             predictionSample.sample = copy(sample)
             predictionSample.velocity = velocity
             self.predictedData.append( predictionSample )
             
Пример #18
0
def ENV_NO_DISPLAY(env=None):
    if env == None:
        env = copy(ENV)
    else:
        env = copy(env)
    env["DISPLAY"] = "-999:-99.-99"
    return env
Пример #19
0
 def __init__( self  , Nthreads , Contrainte, FonctionObject , Ngen , Nbits):
     self.Contrainte = Contrainte
     self.FonctionObject = FonctionObject
     self.Nbits = Nbits
     self.lock = Lock()
     self.lockFiltrate = Lock()
     self.dominated = []
     self.CodeNondominated = []
     self.Nondominated = []        
     self.Nthreads = Nthreads
     self.threads = []
     self.Qlist = []
     self.Qoom = Queue()
     self.Ngen = Ngen
     i = 0
     while i < self.Nthreads :
         qm = QMOO( Contrainte , FonctionObject ,  Nbits)
         self.Qlist.append(copy(qm))
         i = i +1
     i = 0
     while i < self.Nthreads :
         thred = Thread(target = self.RunOnethread,args =() )
         self.threads.append(copy(thred))
         i = i+1
     i = 0
     while i < len( self.Qlist ):
         self.Qlist[i].Evaluate( self.dominated, self.CodeNondominated,self.Nondominated,1 ,self.Qlist[i].Population )
         self.Qoom.put(copy(self.Qlist[i]))
         i = i+1
Пример #20
0
def shuffle_function(*list_object):
    if len(list_object) == 1 and is_tuple_or_list(list_object[0]):
        result_list = list(copy(list_object[0]))
    else:
        result_list = list(copy(list_object))
    shuffle(result_list)
    return result_list
Пример #21
0
 def moveDebugPackage(package):
     # Move all debug packages into packages-debug/ and clean them
     # from WorkDir.
     logger.info("*** Moving debug package '%s' to packages-debug" % package)
     if exists(join(config.workDir, package)):
         copy(join(config.workDir, package), config.debugPath)
         remove(join(config.workDir, package))
Пример #22
0
 def PairedPermutation(self, x, y):
     self.utail = 0
     self.nperm = 0
     self.crit = 0.0
     d = []
     d.append(copy(x))
     d.append(copy(x))
     d.append(copy(y))
     index = [1]*self.d1.N
     for i in range(self.d1.N):
         d[1][i] = x[i]-y[i]
         d[2][i] = y[i]-x[i]
         self.crit = self.crit + d[1][i]
     #for j in range((self.d1.N-1), 0, -1):
     while 1:
         sum = 0
         for i in range(self.d1.N):
             sum = sum + d[index[i]][i]
         self.nperm = self.nperm + 1
         if (sum >= self.crit):
             self.utail = self.utail + 1
         for i in range((self.d1.N-1), 0, -1):
             if (index[i] == 1):
                 index[i] = 2
                 continue
             index[i] = 1
         break
     self.prob = float(self.utail / self.nperm)
Пример #23
0
	def load(self):
		
		try:
			logging.info("Tried to load CFG")
			with open(self.cfg_path, mode="r", encoding="utf-8") as f:
				


				cfg = json.load(f)

	
				self.opts = copy(cfg)
				#logger.warning("READING:  {0}".format(self.opts) )

		except Exception as ex:
				
				
				with open(self.cfg_path, mode="w", encoding="utf-8") as f:
						
						default_opts = copy(self.DEFAULT_OPTS)

						#sorting keys will mess your config if you  wrote in manully 
						#its better off
						
						json.dump(default_opts, f, indent=4, sort_keys=False)
						#self.opts.clear()
						self.opts = copy(default_opts)
Пример #24
0
  def newSharks (self, nb_s) :  #requins a naitre
    while (nb_s >= len(self.sharks)) :
      newsharks = copy(self.sharks)
      for i in xrange (len(newsharks)):
        self.sharks.append(newsharks[i])
      nb_s=nb_s-len(self.sharks)
    fitness =[] #pour pouvoir comparer les fitness des requins
    for i,a in enumerate (self.sharks) :
      fitness.append(a.fit_reproduction)
    for i in xrange (int(nb_s)) :
      fmax = max(fitness)
      new_candidates =[] #leur indice
      for j,a in enumerate (self.sharks) :
        if (a.fit_reproduction == fmax and a.has_rep == False) : 
          new_candidates.append(j)
      if(len(new_candidates)>0):    
							random.shuffle(new_candidates) # on reproduit aleatoirement un des requins qui a la fitness position max
							fitness[new_candidates[0]] = -1
							self.sharks[new_candidates[0]].has_Reproduce()
							self.sharks.append(copy(self.sharks[new_candidates[0]]))
							self.sharks[-1].toMute()
							self.sharks[-1].has_Reproduce()

    for i,a in enumerate (self.sharks) :
      a.reset_Rep();
Пример #25
0
    def parse(self, reader):
        for line in reader.readlines():
            line = line.strip()
            if len(line) == 0: 
                continue

            # List<TaggedWord>
            sentence = copy(self.startMarkers)
            # String[]
            lineParts = re.split("\\s+", line)
            for i in xrange(0, len(lineParts)): 
                # String
                wordTag = lineParts[i]
                # int
                sepIndex = wordTag.rfind('/')
                if sepIndex == -1: 
                    raise CorpusReaderException("Tag is missing in '" + wordTag + "'", CorpusReaderException.CorpusReadError.MISSING_TAG)

                # String
                word = wordTag[:sepIndex]
                # String
                tag = wordTag[sepIndex + 1:]
                if len(word) == 0: 
                    raise CorpusReaderException("Zero-length word in '" + wordTag + "'", CorpusReaderException.CorpusReadError.ZERO_LENGTH_WORD)

                if i == 0: 
                    word = replaceCharAt(word, 0, word[0].lower());

                sentence.append(TaggedWord(word, tag))

            sentence += copy(self.endMarkers)
            self.sentenceHandler.handleSentence(sentence)
Пример #26
0
 def moveDeltaPackage(package):
     # Move all delta packages into packages/ and packages-test/
     # and clean them from workDir.
     logger.info("*** Moving delta package '%s' to both directories" % package)
     if exists(join(config.workDir, package)):
         copy(join(config.workDir, package), config.binaryPath)
         copy(join(config.workDir, package), config.testPath)
         remove(join(config.workDir, package))
Пример #27
0
def ENV_NO_GECKO(env=None):
    """Don't trigger mshtml's gecko install dialog"""
    if env == None:
        env = copy(ENV)
    else:
        env = copy(env)
    env["WINEDLLOVERRIDES"] = "mshtml="
    return env
Пример #28
0
def put(src, dest):
    from hadoop import hdfs
    "Copies from source to dest, handles hadoop put if dest is prefixed with hdfs://"
    if hdfs.isHdfs(dest) :
        hdfs.put(src, dest)
    else:
        pathutil.ensure(pathutil.getDirectory(dest))
        copy(src, dest)
Пример #29
0
	def measDelEnergy(self,q,delta):
		d = zeros(len(q))
		for i in range(len(d)):
			qplus = copy(q)
			qminus = copy(q)
			qplus[i] = min(qplus[i]+delta/2,1)
			qminus[i] = max(qminus[i]-delta/2,0)
			d[i] = (self.measEnergy(qplus) - self.measEnergy(qminus))/(qplus[i]-qminus[i])
		return d
Пример #30
0
 def conversionGeneratorToLearner(self,transform,bias):
     if(self.generator.transformFamily == self.iLearner.transformFamily):
         newTransform = copy(transform)
         newBias = copy(bias)
         return(newTransform,newBias)
     elif(self.generator.transformFamily == LINEAR):
         return self.linearToLinearIncrement(transform,bias)
     else:
         return self.linearIncrementToLinear(transform,bias)
Пример #31
0
def safe_new(x, tag='', dtype=None):
    """
    Internal function that constructs a new variable from x with the same
    type, but with a different name (old name + tag). This function is used
    by gradient, or the R-op to construct new variables for the inputs of
    the inner graph such that there is no interference between the original
    graph and the newly constructed graph.

    """
    if hasattr(x, 'name') and x.name is not None:
        nw_name = x.name + tag
    else:
        nw_name = None

    if isinstance(x, theano.Constant):
        if dtype and x.dtype != dtype:
            casted_x = x.astype(dtype)
            nwx = x.__class__(casted_x.type, x.data, x.name)
            nwx.tag = copy(x.tag)
            return nwx
        else:
            return x.clone()
    # Note, as_tensor_variable will convert the Scalar into a
    # TensorScalar that will require a ScalarFromTensor op,
    # making the pushout optimization fail
    elif isinstance(x, scalar.ScalarVariable):
        if dtype:
            nw_x = scalar.get_scalar_type(dtype=dtype)()
        else:
            nw_x = x.type()
        nw_x.name = nw_name
        if theano.config.compute_test_value != 'off':
            # Copy test value, cast it if necessary
            try:
                x_test_value = gof.op.get_test_value(x)
            except AttributeError:
                # There is no test value
                pass
            else:
                # This clause is executed if no exception was raised
                nw_x.tag.test_value = nw_x.type.filter(x_test_value)
        return nw_x
    else:
        try:
            x = tensor.as_tensor_variable(x)
        except TypeError:
            # This could happen for example for random states, and I really
            # want to avoid the convoluted logic that checks for cuda
            # ndarrays
            pass

    # Cast x if needed. If x has a test value, this will also cast it.
    if dtype and x.dtype != dtype:
        x = x.astype(dtype)

    nw_x = x.type()
    nw_x.name = nw_name
    # Preserve test values so that the 'compute_test_value' option can be used.
    # The test value is deep-copied to ensure there can be no interactions
    # between test values, due to inplace operations for instance. This may
    # not be the most efficient memory-wise, though.
    if theano.config.compute_test_value != 'off':
        try:
            nw_x.tag.test_value = copy.deepcopy(gof.op.get_test_value(x))
        except AttributeError:
            # This means `x` has no test value.
            pass

    return nw_x
Пример #32
0
    def edit(self, spec, prefix):
        self.consistency_check(spec)

        pkgconf = which('pkg-config')

        if '^fftw' in spec:
            fftw = spec['fftw:openmp' if '+openmp' in spec else 'fftw']
            fftw_header_dir = fftw.headers.directories[0]
        elif '^intel-mkl' in spec:
            fftw = spec['intel-mkl']
            fftw_header_dir = fftw.headers.directories[0] + '/fftw'
        elif '^intel-parallel-studio+mkl' in spec:
            fftw = spec['intel-parallel-studio']
            fftw_header_dir = fftw.headers.directories[0] + '/fftw'

        optimization_flags = {
            'gcc': [
                '-O2',
                '-funroll-loops',
                '-ftree-vectorize',
            ],
            'intel': [
                '-O2',
                '-pc64',
                '-unroll',
            ],
            'pgi': ['-fast'],
            'cray': ['-O2'],
            'xl': ['-O3'],
        }

        dflags = ['-DNDEBUG']
        cppflags = [
            '-D__LIBINT',
            '-D__FFTW3',
            '-I{0}'.format(fftw_header_dir),
        ]

        if '@:6.9' in spec:
            cppflags += [
                '-D__LIBINT_MAX_AM=6',
                '-D__LIBDERIV_MAX_AM1=5',
            ]

        if '^mpi@3:' in spec:
            cppflags.append('-D__MPI_VERSION=3')
        elif '^mpi@2:' in spec:
            cppflags.append('-D__MPI_VERSION=2')

        cflags = optimization_flags[self.spec.compiler.name][:]
        cxxflags = optimization_flags[self.spec.compiler.name][:]
        fcflags = optimization_flags[self.spec.compiler.name][:]
        nvflags = ['-O3']
        ldflags = []
        libs = []
        gpuver = ''

        if '%intel' in spec:
            cflags.append('-fp-model precise')
            cxxflags.append('-fp-model precise')
            fcflags += [
                '-fp-model precise',
                '-heap-arrays 64',
                '-g',
                '-traceback',
            ]
        elif '%gcc' in spec:
            fcflags += [
                '-ffree-form',
                '-ffree-line-length-none',
                '-ggdb',  # make sure we get proper Fortran backtraces
            ]
        elif '%pgi' in spec:
            fcflags += ['-Mfreeform', '-Mextend']
        elif '%cray' in spec:
            fcflags += ['-emf', '-ffree', '-hflex_mp=strict']
        elif '%xl' in spec:
            fcflags += ['-qpreprocess', '-qstrict', '-q64']
            ldflags += ['-Wl,--allow-multiple-definition']

        if '+openmp' in spec:
            cflags.append(self.compiler.openmp_flag)
            cxxflags.append(self.compiler.openmp_flag)
            fcflags.append(self.compiler.openmp_flag)
            ldflags.append(self.compiler.openmp_flag)
            nvflags.append('-Xcompiler="{0}"'.format(
                self.compiler.openmp_flag))
        elif '%cray' in spec:  # Cray enables OpenMP by default
            cflags += ['-hnoomp']
            cxxflags += ['-hnoomp']
            fcflags += ['-hnoomp']
            ldflags += ['-hnoomp']

        if '@7:' in spec:  # recent versions of CP2K use C++14 CUDA code
            cxxflags.append(self.compiler.cxx14_flag)
            nvflags.append(self.compiler.cxx14_flag)

        ldflags.append(fftw.libs.search_flags)

        if '[email protected]' in spec:
            ldflags.insert(0, '-Wl,--allow-multiple-definition')

        if '@:6.9' in spec:
            # libint-1.x.y has to be linked statically to work around
            # inconsistencies in its Fortran interface definition
            # (short-int vs int) which otherwise causes segfaults at runtime
            # due to wrong offsets into the shared library symbols.
            libs.extend([
                os.path.join(spec['libint'].libs.directories[0], 'libderiv.a'),
                os.path.join(spec['libint'].libs.directories[0], 'libint.a'),
            ])
        else:
            fcflags += pkgconf('--cflags', 'libint2', output=str).split()
            libs += pkgconf('--libs', 'libint2', output=str).split()

        if '+plumed' in self.spec:
            dflags.extend(['-D__PLUMED2'])
            cppflags.extend(['-D__PLUMED2'])
            libs.extend([
                os.path.join(self.spec['plumed'].prefix.lib,
                             'libplumed.{0}'.format(dso_suffix))
            ])

        cc = spack_cc if '~mpi' in spec else spec['mpi'].mpicc
        cxx = spack_cxx if '~mpi' in spec else spec['mpi'].mpicxx
        fc = spack_fc if '~mpi' in spec else spec['mpi'].mpifc

        # Intel
        if '%intel' in spec:
            cppflags.extend([
                '-D__INTEL',
                '-D__HAS_ISO_C_BINDING',
                '-D__USE_CP2K_TRACE',
            ])
            fcflags.extend(
                ['-diag-disable 8290,8291,10010,10212,11060', '-free', '-fpp'])

        # FFTW, LAPACK, BLAS
        lapack = spec['lapack'].libs
        blas = spec['blas'].libs
        ldflags.append((lapack + blas).search_flags)
        libs.extend([str(x) for x in (fftw.libs, lapack, blas)])

        if '^intel-mkl' in spec or '^intel-parallel-studio+mkl' in spec:
            cppflags += ['-D__MKL']
        elif '^accelerate' in spec:
            cppflags += ['-D__ACCELERATE']

        if '+cosma' in spec:
            # add before ScaLAPACK to override the p?gemm symbols
            cosma = spec['cosma'].libs
            ldflags.append(cosma.search_flags)
            libs.extend(cosma)

        # MPI
        if '+mpi' in spec:
            cppflags.extend(['-D__parallel', '-D__SCALAPACK'])

            scalapack = spec['scalapack'].libs
            ldflags.append(scalapack.search_flags)

            libs.extend(scalapack)
            libs.extend(spec['mpi:cxx'].libs)
            libs.extend(self.compiler.stdcxx_libs)

            if 'wannier90' in spec:
                cppflags.append('-D__WANNIER90')
                wannier = os.path.join(spec['wannier90'].libs.directories[0],
                                       'libwannier.a')
                libs.append(wannier)

        if '+libxc' in spec:
            cppflags += ['-D__LIBXC']

            if '@:6.9' in spec:
                libxc = spec['libxc:fortran,static']
                cppflags += [libxc.headers.cpp_flags]
                ldflags.append(libxc.libs.search_flags)
                libs.append(str(libxc.libs))
            else:
                fcflags += pkgconf('--cflags', 'libxcf03', output=str).split()
                libs += pkgconf('--libs', 'libxcf03', output=str).split()

        if '+pexsi' in spec:
            cppflags.append('-D__LIBPEXSI')
            fcflags.append('-I' +
                           os.path.join(spec['pexsi'].prefix, 'fortran'))
            libs.extend([
                os.path.join(spec['pexsi'].libs.directories[0], 'libpexsi.a'),
                os.path.join(spec['superlu-dist'].libs.directories[0],
                             'libsuperlu_dist.a'),
                os.path.join(spec['parmetis'].libs.directories[0],
                             'libparmetis.{0}'.format(dso_suffix)),
                os.path.join(spec['metis'].libs.directories[0],
                             'libmetis.{0}'.format(dso_suffix)),
            ])

        if '+elpa' in spec:
            elpa = spec['elpa']
            elpa_suffix = '_openmp' if '+openmp' in elpa else ''
            elpa_incdir = elpa.headers.directories[0]

            fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'modules'))]
            libs.append(
                os.path.join(
                    elpa.libs.directories[0],
                    ('libelpa{elpa_suffix}.{dso_suffix}'.format(
                        elpa_suffix=elpa_suffix, dso_suffix=dso_suffix))))

            if spec.satisfies('@:4.999'):
                if elpa.satisfies('@:2014.5.999'):
                    cppflags.append('-D__ELPA')
                elif elpa.satisfies('@2014.6:2015.10.999'):
                    cppflags.append('-D__ELPA2')
                else:
                    cppflags.append('-D__ELPA3')
            else:
                cppflags.append('-D__ELPA={0}{1:02d}'.format(
                    elpa.version[0], int(elpa.version[1])))
                fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'elpa'))]

        if spec.satisfies('+sirius'):
            sirius = spec['sirius']
            cppflags.append('-D__SIRIUS')
            fcflags += ['-I{0}'.format(os.path.join(sirius.prefix, 'fortran'))]
            libs += list(sirius.libs)

        if spec.satisfies('+cuda'):
            cppflags += ['-D__ACC']
            libs += ['-lcudart', '-lnvrtc', '-lcuda']

            if spec.satisfies('+cuda_blas'):
                cppflags += ['-D__DBCSR_ACC=2']
                libs += ['-lcublas']
            else:
                cppflags += ['-D__DBCSR_ACC']

            if spec.satisfies('+cuda_fft'):
                cppflags += ['-D__PW_CUDA']
                libs += ['-lcufft', '-lcublas']

            cuda_arch = spec.variants['cuda_arch'].value
            if cuda_arch:
                gpuver = {
                    '35': 'K40',
                    '37': 'K80',
                    '60': 'P100',
                    '70': 'V100',
                }[cuda_arch]

                if (cuda_arch == '35'
                        and spec.satisfies('+cuda_arch_35_k20x')):
                    gpuver = 'K20X'

        if 'smm=libsmm' in spec:
            lib_dir = os.path.join('lib', self.makefile_architecture,
                                   self.makefile_version)
            mkdirp(lib_dir)
            try:
                copy(env['LIBSMM_PATH'], os.path.join(lib_dir, 'libsmm.a'))
            except KeyError:
                raise KeyError('Point environment variable LIBSMM_PATH to '
                               'the absolute path of the libsmm.a file')
            except IOError:
                raise IOError('The file LIBSMM_PATH pointed to does not '
                              'exist. Note that it must be absolute path.')
            cppflags.extend([
                '-D__HAS_smm_dnn',
                '-D__HAS_smm_vec',
            ])
            libs.append('-lsmm')

        elif 'smm=libxsmm' in spec:
            cppflags += ['-D__LIBXSMM']
            cppflags += pkgconf('--cflags-only-other', 'libxsmmf',
                                output=str).split()
            fcflags += pkgconf('--cflags-only-I', 'libxsmmf',
                               output=str).split()
            libs += pkgconf('--libs', 'libxsmmf', output=str).split()

        dflags.extend(cppflags)
        cflags.extend(cppflags)
        cxxflags.extend(cppflags)
        fcflags.extend(cppflags)
        nvflags.extend(cppflags)

        with open(self.makefile, 'w') as mkf:
            if '+plumed' in spec:
                mkf.write('# include Plumed.inc as recommended by'
                          'PLUMED to include libraries and flags')
                mkf.write('include {0}\n'.format(
                    spec['plumed'].package.plumed_inc))

            mkf.write('\n# COMPILER, LINKER, TOOLS\n\n')
            mkf.write('FC  = {0}\n'
                      'CC  = {1}\n'
                      'CXX = {2}\n'
                      'LD  = {3}\n'.format(fc, cc, cxx, fc))

            if '%intel' in spec:
                intel_bin_dir = ancestor(self.compiler.cc)
                # CPP is a commented command in Intel arch of CP2K
                # This is the hack through which cp2k developers avoid doing :
                #
                # ${CPP} <file>.F > <file>.f90
                #
                # and use `-fpp` instead
                mkf.write('CPP = # {0} -P\n'.format(spack_cc))
                mkf.write('AR  = {0}/xiar -r\n'.format(intel_bin_dir))
            else:
                mkf.write('CPP = # {0} -E\n'.format(spack_cc))
                mkf.write('AR  = ar -r\n')

            if spec.satisfies('+cuda'):
                mkf.write('NVCC = {0}\n'.format(
                    os.path.join(spec['cuda'].prefix, 'bin', 'nvcc')))

            # Write compiler flags to file
            def fflags(var, lst):
                return '{0} = {1}\n\n'.format(var, ' \\\n\t'.join(lst))

            mkf.write('\n# FLAGS & LIBRARIES\n')
            mkf.write(fflags('DFLAGS', dflags))
            mkf.write(fflags('CPPFLAGS', cppflags))
            mkf.write(fflags('CFLAGS', cflags))
            mkf.write(fflags('CXXFLAGS', cxxflags))
            mkf.write(fflags('NVFLAGS', nvflags))
            mkf.write(fflags('FCFLAGS', fcflags))
            mkf.write(fflags('LDFLAGS', ldflags))
            mkf.write(fflags('LIBS', libs))

            if '%intel' in spec:
                mkf.write(fflags('LDFLAGS_C', ldflags + ['-nofor_main']))

            mkf.write('# CP2K-specific flags\n\n')
            mkf.write('GPUVER = {0}\n'.format(gpuver))
            mkf.write('DATA_DIR = {0}\n'.format(self.prefix.share.data))
Пример #33
0
 def copy(self):
     return copy(self)
Пример #34
0
    def edit(self, spec, prefix):
        pkgconf = which('pkg-config')

        if '^fftw' in spec:
            fftw = spec['fftw:openmp' if '+openmp' in spec else 'fftw']
            fftw_header_dir = fftw.headers.directories[0]
        elif '^amdfftw' in spec:
            fftw = spec['amdfftw:openmp' if '+openmp' in spec else 'amdfftw']
            fftw_header_dir = fftw.headers.directories[0]
        elif '^intel-mkl' in spec:
            fftw = spec['intel-mkl']
            fftw_header_dir = fftw.headers.directories[0] + '/fftw'
        elif '^intel-oneapi-mkl' in spec:
            fftw = spec['intel-oneapi-mkl']
            fftw_header_dir = fftw.headers.directories[0] + '/fftw'
        elif '^intel-parallel-studio+mkl' in spec:
            fftw = spec['intel-parallel-studio']
            fftw_header_dir = '<NOTFOUND>'
            for incdir in [
                    join_path(f, 'fftw') for f in fftw.headers.directories
            ]:
                if os.path.exists(incdir):
                    fftw_header_dir = incdir
                    break
        elif '^cray-fftw' in spec:
            fftw = spec['cray-fftw']
            fftw_header_dir = fftw.headers.directories[0]

        optimization_flags = {
            'gcc': [
                '-O2',
                '-funroll-loops',
                '-ftree-vectorize',
            ],
            'intel': [
                '-O2',
                '-pc64',
                '-unroll',
            ],
            'pgi': ['-fast'],
            'nvhpc': ['-fast'],
            'cce': ['-O2'],
            'xl': ['-O3'],
            'aocc': ['-O1'],
        }

        dflags = ['-DNDEBUG']
        cppflags = [
            '-D__FFTW3',
            '-I{0}'.format(fftw_header_dir),
        ]

        if '^mpi@3:' in spec:
            cppflags.append('-D__MPI_VERSION=3')
        elif '^mpi@2:' in spec:
            cppflags.append('-D__MPI_VERSION=2')

        cflags = optimization_flags[self.spec.compiler.name][:]
        cxxflags = optimization_flags[self.spec.compiler.name][:]
        fcflags = optimization_flags[self.spec.compiler.name][:]
        nvflags = ['-O3']
        ldflags = []
        libs = []
        gpuver = ''

        if '%intel' in spec:
            cflags.append('-fp-model precise')
            cxxflags.append('-fp-model precise')
            fcflags += [
                '-fp-model precise',
                '-heap-arrays 64',
                '-g',
                '-traceback',
            ]
        elif '%gcc' in spec:
            fcflags += [
                '-ffree-form',
                '-ffree-line-length-none',
                '-ggdb',  # make sure we get proper Fortran backtraces
            ]
        elif '%aocc' in spec:
            fcflags += [
                '-ffree-form',
                '-Mbackslash',
            ]
        elif '%pgi' in spec or '%nvhpc' in spec:
            fcflags += ['-Mfreeform', '-Mextend']
        elif '%cce' in spec:
            fcflags += ['-emf', '-ffree', '-hflex_mp=strict']
        elif '%xl' in spec:
            fcflags += ['-qpreprocess', '-qstrict', '-q64']
            ldflags += ['-Wl,--allow-multiple-definition']

        if '%gcc@10: +mpi' in spec and spec['mpi'].name in [
                'mpich', 'cray-mpich'
        ]:
            fcflags += ['-fallow-argument-mismatch'
                        ]  # https://github.com/pmodels/mpich/issues/4300

        if '+openmp' in spec:
            cflags.append(self.compiler.openmp_flag)
            cxxflags.append(self.compiler.openmp_flag)
            fcflags.append(self.compiler.openmp_flag)
            ldflags.append(self.compiler.openmp_flag)
            nvflags.append('-Xcompiler="{0}"'.format(
                self.compiler.openmp_flag))
        elif '%cce' in spec:  # Cray enables OpenMP by default
            cflags += ['-hnoomp']
            cxxflags += ['-hnoomp']
            fcflags += ['-hnoomp']
            ldflags += ['-hnoomp']

        if '@7:' in spec:  # recent versions of CP2K use C++14 CUDA code
            cxxflags.append(self.compiler.cxx14_flag)
            nvflags.append(self.compiler.cxx14_flag)

        ldflags.append(fftw.libs.search_flags)

        if '[email protected]' in spec:
            ldflags.insert(0, '-Wl,--allow-multiple-definition')

        if '+plumed' in self.spec:
            dflags.extend(['-D__PLUMED2'])
            cppflags.extend(['-D__PLUMED2'])
            libs.extend([
                join_path(self.spec['plumed'].prefix.lib,
                          'libplumed.{0}'.format(dso_suffix))
            ])

        cc = spack_cc if '~mpi' in spec else spec['mpi'].mpicc
        cxx = spack_cxx if '~mpi' in spec else spec['mpi'].mpicxx
        fc = spack_fc if '~mpi' in spec else spec['mpi'].mpifc

        # Intel
        if '%intel' in spec:
            cppflags.extend([
                '-D__INTEL',
                '-D__HAS_ISO_C_BINDING',
                '-D__USE_CP2K_TRACE',
            ])
            fcflags.extend(
                ['-diag-disable 8290,8291,10010,10212,11060', '-free', '-fpp'])

        # FFTW, LAPACK, BLAS
        lapack = spec['lapack'].libs
        blas = spec['blas'].libs
        ldflags.append((lapack + blas).search_flags)
        libs.extend([str(x) for x in (fftw.libs, lapack, blas)])

        if any(p in spec for p in ('^intel-mkl', '^intel-parallel-studio+mkl',
                                   '^intel-oneapi-mkl')):
            cppflags += ['-D__MKL']
        elif '^accelerate' in spec:
            cppflags += ['-D__ACCELERATE']

        if '+cosma' in spec:
            # add before ScaLAPACK to override the p?gemm symbols
            cosma = spec['cosma'].libs
            ldflags.append(cosma.search_flags)
            libs.extend(cosma)

        # MPI
        if '+mpi' in spec:
            cppflags.extend(['-D__parallel', '-D__SCALAPACK'])

            if '^intel-oneapi-mpi' in spec:
                mpi = [
                    join_path(spec['intel-oneapi-mpi'].libs.directories[0],
                              'libmpi.so')
                ]
            else:
                mpi = spec['mpi:cxx'].libs

            # while intel-mkl has a mpi variant and adds the scalapack
            # libs to its libs, intel-oneapi-mkl does not.
            if '^intel-oneapi-mkl' in spec:
                mpi_impl = 'openmpi' if '^openmpi' in spec else 'intelmpi'
                scalapack = [
                    join_path(spec['intel-oneapi-mkl'].libs.directories[0],
                              'libmkl_scalapack_lp64.so'),
                    join_path(spec['intel-oneapi-mkl'].libs.directories[0],
                              'libmkl_blacs_{0}_lp64.so'.format(mpi_impl))
                ]
            else:
                scalapack = spec['scalapack'].libs
                ldflags.append(scalapack.search_flags)

            libs.extend(scalapack)
            libs.extend(mpi)
            libs.extend(self.compiler.stdcxx_libs)

            if 'wannier90' in spec:
                cppflags.append('-D__WANNIER90')
                wannier = join_path(spec['wannier90'].libs.directories[0],
                                    'libwannier.a')
                libs.append(wannier)

        if '+libint' in spec:
            cppflags += ['-D__LIBINT']

            if '@:6.9' in spec:
                cppflags += [
                    '-D__LIBINT_MAX_AM=6',
                    '-D__LIBDERIV_MAX_AM1=5',
                ]

                # libint-1.x.y has to be linked statically to work around
                # inconsistencies in its Fortran interface definition
                # (short-int vs int) which otherwise causes segfaults at
                # runtime due to wrong offsets into the shared library
                # symbols.
                libs.extend([
                    join_path(spec['libint'].libs.directories[0],
                              'libderiv.a'),
                    join_path(spec['libint'].libs.directories[0], 'libint.a'),
                ])
            else:
                fcflags += pkgconf('--cflags', 'libint2', output=str).split()
                libs += pkgconf('--libs', 'libint2', output=str).split()

        if '+libxc' in spec:
            cppflags += ['-D__LIBXC']

            if '@:6.9' in spec:
                libxc = spec['libxc:fortran,static']
                cppflags += [libxc.headers.cpp_flags]
                ldflags.append(libxc.libs.search_flags)
                libs.append(str(libxc.libs))
            else:
                fcflags += pkgconf('--cflags', 'libxcf03', output=str).split()
                # some Fortran functions seem to be direct wrappers of the
                # C functions such that we get a direct dependency on them,
                # requiring `-lxc` to be present in addition to `-lxcf03`
                libs += pkgconf('--libs', 'libxcf03', 'libxc',
                                output=str).split()

        if '+pexsi' in spec:
            cppflags.append('-D__LIBPEXSI')
            fcflags.append('-I' + join_path(spec['pexsi'].prefix, 'fortran'))
            libs.extend([
                join_path(spec['pexsi'].libs.directories[0], 'libpexsi.a'),
                join_path(spec['superlu-dist'].libs.directories[0],
                          'libsuperlu_dist.a'),
                join_path(spec['parmetis'].libs.directories[0],
                          'libparmetis.{0}'.format(dso_suffix)),
                join_path(spec['metis'].libs.directories[0],
                          'libmetis.{0}'.format(dso_suffix)),
            ])

        if '+elpa' in spec:
            elpa = spec['elpa']
            elpa_suffix = '_openmp' if '+openmp' in elpa else ''
            elpa_incdir = elpa.headers.directories[0]

            fcflags += ['-I{0}'.format(join_path(elpa_incdir, 'modules'))]

            # Currently AOCC support only static libraries of ELPA
            if '%aocc' in spec:
                libs.append(
                    join_path(elpa.prefix.lib,
                              ('libelpa{elpa_suffix}.a'.format(
                                  elpa_suffix=elpa_suffix))))
            else:
                libs.append(
                    join_path(
                        elpa.libs.directories[0],
                        ('libelpa{elpa_suffix}.{dso_suffix}'.format(
                            elpa_suffix=elpa_suffix, dso_suffix=dso_suffix))))

            if spec.satisfies('@:4'):
                if elpa.satisfies('@:2014.5'):
                    cppflags.append('-D__ELPA')
                elif elpa.satisfies('@2014.6:2015.10'):
                    cppflags.append('-D__ELPA2')
                else:
                    cppflags.append('-D__ELPA3')
            else:
                cppflags.append('-D__ELPA={0}{1:02d}'.format(
                    elpa.version[0], int(elpa.version[1])))
                fcflags += ['-I{0}'.format(join_path(elpa_incdir, 'elpa'))]

            if '+cuda' in spec and '+cuda' in elpa:
                cppflags += ['-D__ELPA_NVIDIA_GPU']

        if spec.satisfies('+sirius'):
            sirius = spec['sirius']
            cppflags.append('-D__SIRIUS')
            fcflags += ['-I{0}'.format(sirius.prefix.include.sirius)]
            libs += list(sirius.libs)

        if spec.satisfies('+cuda'):
            libs += [
                '-L{}'.format(spec['cuda'].libs.directories[0]),
                '-L{}/stubs'.format(spec['cuda'].libs.directories[0]),
                '-lcuda', '-lcudart', '-lnvrtc', '-lstdc++'
            ]

            if spec.satisfies('@9:'):
                acc_compiler_var = 'OFFLOAD_CC'
                acc_flags_var = 'OFFLOAD_FLAGS'
                cppflags += [
                    '-D__DBCSR_ACC',
                    '-D__GRID_CUDA',
                    '-DOFFLOAD_TARGET=cuda',
                ]
                libs += ['-lcublas']
            else:
                acc_compiler_var = 'NVCC'
                acc_flags_var = 'NVFLAGS'
                cppflags += ['-D__ACC']
                if spec.satisfies('+cuda_blas'):
                    cppflags += ['-D__DBCSR_ACC=2']
                    libs += ['-lcublas']
                else:
                    cppflags += ['-D__DBCSR_ACC']

            if spec.satisfies('+cuda_fft'):
                cppflags += ['-D__PW_CUDA']
                libs += ['-lcufft', '-lcublas']

            cuda_arch = spec.variants['cuda_arch'].value[0]
            if cuda_arch:
                gpuver = {
                    '35': 'K40',
                    '37': 'K80',
                    '60': 'P100',
                    '70': 'V100',
                }[cuda_arch]

                if (cuda_arch == '35'
                        and spec.satisfies('+cuda_arch_35_k20x')):
                    gpuver = 'K20X'

        if 'smm=libsmm' in spec:
            lib_dir = join_path('lib', self.makefile_architecture,
                                self.makefile_version)
            mkdirp(lib_dir)
            try:
                copy(env['LIBSMM_PATH'], join_path(lib_dir, 'libsmm.a'))
            except KeyError:
                raise KeyError('Point environment variable LIBSMM_PATH to '
                               'the absolute path of the libsmm.a file')
            except IOError:
                raise IOError('The file LIBSMM_PATH pointed to does not '
                              'exist. Note that it must be absolute path.')
            cppflags.extend([
                '-D__HAS_smm_dnn',
                '-D__HAS_smm_vec',
            ])
            libs.append('-lsmm')

        elif 'smm=libxsmm' in spec:
            cppflags += ['-D__LIBXSMM']
            cppflags += pkgconf('--cflags-only-other', 'libxsmmf',
                                output=str).split()
            fcflags += pkgconf('--cflags-only-I', 'libxsmmf',
                               output=str).split()
            libs += pkgconf('--libs', 'libxsmmf', output=str).split()

        if '+libvori' in spec:
            cppflags += ['-D__LIBVORI']
            libvori = spec['libvori'].libs
            ldflags += [libvori.search_flags]
            libs += libvori
            libs += ['-lstdc++']

        if '+spglib' in spec:
            cppflags += ['-D__SPGLIB']
            spglib = spec['spglib'].libs
            ldflags += [spglib.search_flags]
            libs += spglib

        dflags.extend(cppflags)
        cflags.extend(cppflags)
        cxxflags.extend(cppflags)
        fcflags.extend(cppflags)
        nvflags.extend(cppflags)

        with open(self.makefile, 'w') as mkf:
            if '+plumed' in spec:
                mkf.write('# include Plumed.inc as recommended by'
                          'PLUMED to include libraries and flags')
                mkf.write('include {0}\n'.format(
                    spec['plumed'].package.plumed_inc))

            mkf.write('\n# COMPILER, LINKER, TOOLS\n\n')
            mkf.write('FC  = {0}\n'
                      'CC  = {1}\n'
                      'CXX = {2}\n'
                      'LD  = {3}\n'.format(fc, cc, cxx, fc))

            if '%intel' in spec:
                intel_bin_dir = ancestor(self.compiler.cc)
                # CPP is a commented command in Intel arch of CP2K
                # This is the hack through which cp2k developers avoid doing :
                #
                # ${CPP} <file>.F > <file>.f90
                #
                # and use `-fpp` instead
                mkf.write('CPP = # {0} -P\n'.format(spack_cc))
                mkf.write('AR  = {0}/xiar -r\n'.format(intel_bin_dir))
            else:
                mkf.write('CPP = # {0} -E\n'.format(spack_cc))
                mkf.write('AR  = ar -r\n')

            if '+cuda' in spec:
                mkf.write('{0} = {1}\n'.format(
                    acc_compiler_var,
                    join_path(spec['cuda'].prefix, 'bin', 'nvcc')))

            # Write compiler flags to file
            def fflags(var, lst):
                return '{0} = {1}\n\n'.format(var, ' \\\n\t'.join(lst))

            mkf.write('\n# FLAGS & LIBRARIES\n')
            mkf.write(fflags('DFLAGS', dflags))
            mkf.write(fflags('CPPFLAGS', cppflags))
            mkf.write(fflags('CFLAGS', cflags))
            mkf.write(fflags('CXXFLAGS', cxxflags))
            if '+cuda' in spec:
                mkf.write(fflags(acc_flags_var, nvflags))
            mkf.write(fflags('FCFLAGS', fcflags))
            mkf.write(fflags('LDFLAGS', ldflags))
            mkf.write(fflags('LIBS', libs))

            if '%intel' in spec:
                mkf.write(fflags('LDFLAGS_C', ldflags + ['-nofor-main']))

            mkf.write('# CP2K-specific flags\n\n')
            mkf.write('GPUVER = {0}\n'.format(gpuver))
            mkf.write('DATA_DIR = {0}\n'.format(self.prefix.share.data))
Пример #35
0
    def test04_layermap_unique_multigeometry_fk(self):
        "Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
        # All the following should work.
        try:
            # Telling LayerMapping that we want no transformations performed on the data.
            lm = LayerMapping(County, co_shp, co_mapping, transform=False)

            # Specifying the source spatial reference system via the `source_srs` keyword.
            lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
            lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')

            # Unique may take tuple or string parameters.
            for arg in ('name', ('name', 'mpoly')):
                lm = LayerMapping(County,
                                  co_shp,
                                  co_mapping,
                                  transform=False,
                                  unique=arg)
        except:
            self.fail(
                'No exception should be raised for proper use of keywords.')

        # Testing invalid params for the `unique` keyword.
        for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'),
                       (ValueError, ('name', 'mpolygon'))):
            self.assertRaises(e,
                              LayerMapping,
                              County,
                              co_shp,
                              co_mapping,
                              transform=False,
                              unique=arg)

        # No source reference system defined in the shapefile, should raise an error.
        if not mysql:
            self.assertRaises(LayerMapError, LayerMapping, County, co_shp,
                              co_mapping)

        # Passing in invalid ForeignKey mapping parameters -- must be a dictionary
        # mapping for the model the ForeignKey points to.
        bad_fk_map1 = copy(co_mapping)
        bad_fk_map1['state'] = 'name'
        bad_fk_map2 = copy(co_mapping)
        bad_fk_map2['state'] = {
            'nombre': 'State'
        }
        self.assertRaises(TypeError,
                          LayerMapping,
                          County,
                          co_shp,
                          bad_fk_map1,
                          transform=False)
        self.assertRaises(LayerMapError,
                          LayerMapping,
                          County,
                          co_shp,
                          bad_fk_map2,
                          transform=False)

        # There exist no State models for the ForeignKey mapping to work -- should raise
        # a MissingForeignKey exception (this error would be ignored if the `strict`
        # keyword is not set).
        lm = LayerMapping(County,
                          co_shp,
                          co_mapping,
                          transform=False,
                          unique='name')
        self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)

        # Now creating the state models so the ForeignKey mapping may work.
        co, hi, tx = State(name='Colorado'), State(name='Hawaii'), State(
            name='Texas')
        co.save(), hi.save(), tx.save()

        # If a mapping is specified as a collection, all OGR fields that
        # are not collections will be converted into them.  For example,
        # a Point column would be converted to MultiPoint. Other things being done
        # w/the keyword args:
        #  `transform=False`: Specifies that no transform is to be done; this
        #    has the effect of ignoring the spatial reference check (because the
        #    county shapefile does not have implicit spatial reference info).
        #
        #  `unique='name'`: Creates models on the condition that they have
        #    unique county names; geometries from each feature however will be
        #    appended to the geometry collection of the unique model.  Thus,
        #    all of the various islands in Honolulu county will be in in one
        #    database record with a MULTIPOLYGON type.
        lm = LayerMapping(County,
                          co_shp,
                          co_mapping,
                          transform=False,
                          unique='name')
        lm.save(silent=True, strict=True)

        # A reference that doesn't use the unique keyword; a new database record will
        # created for each polygon.
        lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
        lm.save(silent=True, strict=True)

        # The county helper is called to ensure integrity of County models.
        self.county_helper()
Пример #36
0
    def compute_joint_vel(self, des_vel):

        # joints = self.arm.joint_angles()
        # joints = joints.values()
        joints = copy(self.manipulator_joints)

        with self.robot:
            # self.robot.SetDOFValues(joints[::-1], self.robot.GetActiveManipulator().GetArmIndices())
            self.robot.SetDOFValues(
                joints,
                self.robot.GetActiveManipulator().GetArmIndices())
            J_t = self.robot.GetActiveManipulator().CalculateJacobian()
            J_r = self.robot.GetActiveManipulator(
            ).CalculateAngularVelocityJacobian()
            J = numpy.concatenate((J_t, J_r), axis=0)

        # add joint limit repulsive potential
        mid_joint_limit = (self.joint_limits_lower +
                           self.joint_limits_upper) / 2.0
        Q_star = (self.joint_limits_upper - self.joint_limits_lower) / 20.0

        q_dot = numpy.zeros((7, 1))

        max_joint_speed = 1.0
        K = 1.0
        weight_vector = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
        for i in range(7):
            #q_dot[i] = - (K * weight_vector[i]) * (joints[i] - mid_joint_limit[i]) / ( (self.joint_limits_upper[i] - self.joint_limits_lower[i])**2)
            if (abs(joints[i] - self.joint_limits_upper[i]) <= Q_star[i]):
                q_dot[i] = -(
                    K * weight_vector[i] /
                    (self.joint_limits_upper[i] - self.joint_limits_lower[i])
                ) * (1.0 / Q_star[i] -
                     1.0 / abs(joints[i] - self.joint_limits_upper[i])) * (
                         1.0 / (joints[i] - self.joint_limits_upper[i])**
                         2) * abs(joints[i] - self.joint_limits_upper[i]) / (
                             joints[i] - self.joint_limits_upper[i])
            else:
                q_dot[i] = 0

            if (abs(joints[i] - self.joint_limits_lower[i]) <= Q_star[i]):
                q_dot[i] = q_dot[i] - (
                    K * weight_vector[i] /
                    (self.joint_limits_upper[i] - self.joint_limits_lower[i])
                ) * (1.0 / Q_star[i] -
                     1.0 / abs(joints[i] - self.joint_limits_lower[i])) * (
                         1.0 / (joints[i] - self.joint_limits_lower[i])**
                         2) * abs(joints[i] - self.joint_limits_lower[i]) / (
                             joints[i] - self.joint_limits_lower[i])

            if (abs(q_dot[i]) > max_joint_speed):
                q_dot[i] = max_joint_speed * self.normalize(q_dot[i])

        # print(numpy.linalg.pinv(J).shape)
        # print(des_vel.shape)
        # print(q_dot.shape)
        # print("joints: " + str(self.manipulator_joints))
        # print ("q: " + str(numpy.dot( numpy.linalg.pinv(J), des_vel.reshape(6,1))))
        # print ("q_dot: " + str(q_dot))
        # print ("qdot_proj: " + str(numpy.dot( (numpy.eye(7) - numpy.dot( numpy.linalg.pinv(J) , J )), q_dot)))
        return numpy.dot(numpy.linalg.pinv(J), des_vel.reshape(
            6, 1)) + numpy.dot(
                (numpy.eye(7) - numpy.dot(numpy.linalg.pinv(J), J)), q_dot)
Пример #37
0
 def get_alias(self):
     return copy(self.alias)
Пример #38
0
    def edit(self, spec, prefix):

        fftw = spec['fftw:openmp' if '+openmp' in spec else 'fftw']

        optimization_flags = {
            'gcc': [
                '-O2',
                '-funroll-loops',
                '-ftree-vectorize',
            ],
            'intel': ['-O2', '-pc64', '-unroll'],
            'pgi': ['-fast'],
        }

        dflags = ['-DNDEBUG']
        cppflags = [
            '-D__LIBINT',
            '-D__FFTW3',
            fftw.headers.cpp_flags,
        ]

        if '@:6.9' in spec:
            cppflags += [
                '-D__LIBINT_MAX_AM=6',
                '-D__LIBDERIV_MAX_AM1=5',
            ]

        if '^mpi@3:' in spec:
            cppflags.append('-D__MPI_VERSION=3')
        elif '^mpi@2:' in spec:
            cppflags.append('-D__MPI_VERSION=2')

        cflags = optimization_flags[self.spec.compiler.name][:]
        cxxflags = optimization_flags[self.spec.compiler.name][:]
        fcflags = optimization_flags[self.spec.compiler.name][:]
        nvflags = ['-O3']
        ldflags = []
        libs = []
        gpuver = ''

        if '%intel' in spec:
            cflags.append('-fp-model precise')
            cxxflags.append('-fp-model precise')
            fcflags += [
                '-fp-model source',
                '-heap-arrays 64',
                '-g',
                '-traceback',
            ]
        elif '%gcc' in spec:
            fcflags.extend([
                '-ffree-form',
                '-ffree-line-length-none',
                '-ggdb',  # make sure we get proper Fortran backtraces
            ])
        elif '%pgi' in spec:
            fcflags.extend(['-Mfreeform', '-Mextend'])

        if '+openmp' in spec:
            cflags.append(self.compiler.openmp_flag)
            cxxflags.append(self.compiler.openmp_flag)
            fcflags.append(self.compiler.openmp_flag)
            ldflags.append(self.compiler.openmp_flag)
            nvflags.append('-Xcompiler="{0}"'.format(
                self.compiler.openmp_flag))

        ldflags.append(fftw.libs.search_flags)

        if '[email protected]' in spec:
            ldflags.insert(0, '-Wl,--allow-multiple-definition')

        if '@:6.9' in spec:
            # libint-1.x.y has to be linked statically to work around
            # inconsistencies in its Fortran interface definition
            # (short-int vs int) which otherwise causes segfaults at runtime
            # due to wrong offsets into the shared library symbols.
            libs.extend([
                os.path.join(spec['libint'].libs.directories[0], 'libderiv.a'),
                os.path.join(spec['libint'].libs.directories[0], 'libint.a'),
            ])
        else:
            fcflags += ['$(shell pkg-config --cflags libint2)']
            libs += ['$(shell pkg-config --libs libint2)']

        if '+plumed' in self.spec:
            dflags.extend(['-D__PLUMED2'])
            cppflags.extend(['-D__PLUMED2'])
            libs.extend([
                os.path.join(self.spec['plumed'].prefix.lib,
                             'libplumed.{0}'.format(dso_suffix))
            ])

        fc = self.compiler.fc if '~mpi' in spec else self.spec['mpi'].mpifc

        # Intel
        if '%intel' in self.spec:
            cppflags.extend([
                '-D__INTEL', '-D__HAS_ISO_C_BINDING', '-D__USE_CP2K_TRACE',
                '-D__MKL'
            ])
            fcflags.extend(
                ['-diag-disable 8290,8291,10010,10212,11060', '-free', '-fpp'])

        # FFTW, LAPACK, BLAS
        lapack = spec['lapack'].libs
        blas = spec['blas'].libs
        ldflags.append((lapack + blas).search_flags)
        libs.extend([str(x) for x in (fftw.libs, lapack, blas)])

        if self.spec.variants['blas'].value == 'mkl':
            cppflags += ['-D__MKL']
        elif self.spec.variants['blas'].value == 'accelerate':
            cppflags += ['-D__ACCELERATE']

        # MPI
        if '+mpi' in self.spec:
            cppflags.extend(['-D__parallel', '-D__SCALAPACK'])

            scalapack = spec['scalapack'].libs
            ldflags.append(scalapack.search_flags)

            libs.extend(scalapack)
            libs.extend(self.spec['mpi:cxx'].libs)
            libs.extend(self.compiler.stdcxx_libs)

            if 'wannier90' in spec:
                cppflags.append('-D__WANNIER90')
                wannier = os.path.join(spec['wannier90'].libs.directories[0],
                                       'libwannier.a')
                libs.append(wannier)

        if '+libxc' in spec:
            cppflags += ['-D__LIBXC']

            if '@:6.9' in spec:
                libxc = spec['libxc:fortran,static']
                cppflags += [libxc.headers.cpp_flags]
                ldflags.append(libxc.libs.search_flags)
                libs.append(str(libxc.libs))
            else:
                fcflags += ['$(shell pkg-config --cflags libxcf03)']
                libs += ['$(shell pkg-config --libs libxcf03)']

        if '+pexsi' in self.spec:
            cppflags.append('-D__LIBPEXSI')
            fcflags.append('-I' +
                           os.path.join(spec['pexsi'].prefix, 'fortran'))
            libs.extend([
                os.path.join(spec['pexsi'].libs.directories[0], 'libpexsi.a'),
                os.path.join(spec['superlu-dist'].libs.directories[0],
                             'libsuperlu_dist.a'),
                os.path.join(spec['parmetis'].libs.directories[0],
                             'libparmetis.{0}'.format(dso_suffix)),
                os.path.join(spec['metis'].libs.directories[0],
                             'libmetis.{0}'.format(dso_suffix)),
            ])

        if '+elpa' in self.spec:
            elpa = spec['elpa']
            elpa_suffix = '_openmp' if '+openmp' in elpa else ''
            elpa_incdir = elpa.headers.directories[0]

            fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'modules'))]
            libs.append(
                os.path.join(
                    elpa.libs.directories[0],
                    ('libelpa{elpa_suffix}.{dso_suffix}'.format(
                        elpa_suffix=elpa_suffix, dso_suffix=dso_suffix))))

            if spec.satisfies('@:4.999'):
                if elpa.satisfies('@:2014.5.999'):
                    cppflags.append('-D__ELPA')
                elif elpa.satisfies('@2014.6:2015.10.999'):
                    cppflags.append('-D__ELPA2')
                else:
                    cppflags.append('-D__ELPA3')
            else:
                cppflags.append('-D__ELPA={0}{1:02d}'.format(
                    elpa.version[0], int(elpa.version[1])))
                fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'elpa'))]

        if self.spec.satisfies('+sirius'):
            sirius = spec['sirius']
            cppflags.append('-D__SIRIUS')
            fcflags += ['-I{0}'.format(os.path.join(sirius.prefix, 'fortran'))]
            libs += list(sirius.libs)

        if self.spec.satisfies('+cuda'):
            cppflags += ['-D__ACC']
            libs += ['-lcudart', '-lnvrtc', '-lcuda']

            if self.spec.satisfies('+cuda_blas'):
                cppflags += ['-D__DBCSR_ACC=2']
                libs += ['-lcublas']
            else:
                cppflags += ['-D__DBCSR_ACC']

            if self.spec.satisfies('+cuda_fft'):
                cppflags += ['-D__PW_CUDA']
                libs += ['-lcufft', '-lcublas']

            cuda_arch = self.spec.variants['cuda_arch'].value
            if cuda_arch:
                gpuver = {
                    '35': 'K40',
                    '37': 'K80',
                    '60': 'P100',
                    '70': 'V100',
                }[cuda_arch]

                if (cuda_arch == '35'
                        and self.spec.satisfies('+cuda_arch_35_k20x')):
                    gpuver = 'K20X'

        if 'smm=libsmm' in spec:
            lib_dir = os.path.join('lib', self.makefile_architecture,
                                   self.makefile_version)
            mkdirp(lib_dir)
            try:
                copy(env['LIBSMM_PATH'], os.path.join(lib_dir, 'libsmm.a'))
            except KeyError:
                raise KeyError('Point environment variable LIBSMM_PATH to '
                               'the absolute path of the libsmm.a file')
            except IOError:
                raise IOError('The file LIBSMM_PATH pointed to does not '
                              'exist. Note that it must be absolute path.')
            cppflags.extend([
                '-D__HAS_smm_dnn',
                '-D__HAS_smm_vec',
            ])
            libs.append('-lsmm')

        elif 'smm=libxsmm' in spec:
            cppflags.extend([
                '-D__LIBXSMM',
                '$(shell pkg-config --cflags-only-other libxsmmf)',
            ])
            fcflags.append('$(shell pkg-config --cflags-only-I libxsmmf)')
            libs.append('$(shell pkg-config --libs libxsmmf)')

        dflags.extend(cppflags)
        cflags.extend(cppflags)
        cxxflags.extend(cppflags)
        fcflags.extend(cppflags)
        nvflags.extend(cppflags)

        with open(self.makefile, 'w') as mkf:
            if '+plumed' in self.spec:
                # Include Plumed.inc in the Makefile
                mkf.write('include {0}\n'.format(
                    self.spec['plumed'].package.plumed_inc))

            mkf.write('CC = {0.compiler.cc}\n'.format(self))
            if '%intel' in self.spec:
                # CPP is a commented command in Intel arch of CP2K
                # This is the hack through which cp2k developers avoid doing :
                #
                # ${CPP} <file>.F > <file>.f90
                #
                # and use `-fpp` instead
                mkf.write('CPP = # {0.compiler.cc} -P\n\n'.format(self))
                mkf.write('AR = xiar -r\n\n')
            else:
                mkf.write('CPP = # {0.compiler.cc} -E\n\n'.format(self))
                mkf.write('AR = ar -r\n\n')
            mkf.write('FC = {0}\n'.format(fc))
            mkf.write('LD = {0}\n'.format(fc))

            if self.spec.satisfies('+cuda'):
                mkf.write('NVCC = {0}\n'.format(
                    os.path.join(self.spec['cuda'].prefix, 'bin', 'nvcc')))

            # Write compiler flags to file
            mkf.write('DFLAGS = {0}\n\n'.format(' '.join(dflags)))
            mkf.write('CPPFLAGS = {0}\n\n'.format(' '.join(cppflags)))
            mkf.write('CFLAGS = {0}\n\n'.format(' '.join(cflags)))
            mkf.write('CXXFLAGS = {0}\n\n'.format(' '.join(cxxflags)))
            mkf.write('NVFLAGS = {0}\n\n'.format(' '.join(nvflags)))
            mkf.write('FCFLAGS = {0}\n\n'.format(' '.join(fcflags)))
            mkf.write('LDFLAGS = {0}\n\n'.format(' '.join(ldflags)))
            if '%intel' in spec:
                mkf.write('LDFLAGS_C = {0}\n\n'.format(' '.join(ldflags) +
                                                       ' -nofor_main'))
            mkf.write('LIBS = {0}\n\n'.format(' '.join(libs)))
            mkf.write('GPUVER = {0}\n\n'.format(gpuver))
            mkf.write('DATA_DIR = {0}\n\n'.format(self.prefix.share.data))
Пример #39
0
    def __hist__(self,
                 region=None,
                 histname=None,
                 icut=None,
                 sys=None,
                 mode=None):
        """
        implementation of nominal hist getter
        """
        # compute k-factors for OS and SS regions
        kf_OS = {}
        kf_SS = {}

        # initialise k-factors
        for s in self.mc_samples:
            kf_OS[s] = 1.0
            kf_SS[s] = 1.0

        kf_regions = self.kf_regions

        # compute k-factors
        for s in self.kf_regions.keys():
            tmp_samples = list(self.mc_samples)
            tmp_samples.remove(s)
            data_sub = copy(self.data_minus_mc)
            data_sub.mc_samples = tmp_samples

            kf_OS[s] = histutils.full_integral(
                data_sub.hist(region=kf_regions[s]["OS"],
                              histname=histname,
                              icut=kf_regions[s]["ncuts"],
                              sys=sys,
                              mode=mode))
            kf_OS[s] /= histutils.full_integral(
                s.hist(region=kf_regions[s]["OS"],
                       histname=histname,
                       icut=kf_regions[s]["ncuts"],
                       sys=sys,
                       mode=mode))
            kf_SS[s] = histutils.full_integral(
                data_sub.hist(region=kf_regions[s]["SS"],
                              histname=histname,
                              icut=kf_regions[s]["ncuts"],
                              sys=sys,
                              mode=mode))
            kf_SS[s] /= histutils.full_integral(
                s.hist(region=kf_regions[s]["SS"],
                       histname=histname,
                       icut=kf_regions[s]["ncuts"],
                       sys=sys,
                       mode=mode))

        # compute rqcd transfer factor
        # adding k_factors to the estimators
        self.data_minus_mc_num.mc_samples_rescales = kf_OS
        self.data_minus_mc_den.mc_samples_rescales = kf_SS

        rqcd_regions = self.rqcd_regions

        rqcd = histutils.full_integral(
            self.data_minus_mc_num.hist(
                region=rqcd_regions[self.data_sample]["num"],
                histname=histname,
                icut=rqcd_regions[self.data_sample]["ncuts"],
                sys=sys,
                mode=mode))
        rqcd /= histutils.full_integral(
            self.data_minus_mc_den.hist(
                region=rqcd_regions[self.data_sample]["den"],
                histname=histname,
                icut=rqcd_regions[self.data_sample]["ncuts"],
                sys=sys,
                mode=mode))

        if self.print_info:
            print
            print
            print "++++++++++++++++++++++++++++++++++++++++"
            print "Iteration for %s" % self.sample.name
            print "++++++++++++++++++++++++++++++++++++++++"
            print
            print "k-factors for %s, sys %s, sys_mode %s" % (histname, sys,
                                                             mode)
            print "----------------------------------------"
            print "Sample | Region | k-factor | Rqcd"
            print "----------------------------------------"
            for s in self.kf_regions.keys():
                print "%s | %s | %.3lf | %.3lf" % (s.name, kf_regions[s]["OS"],
                                                   kf_OS[s], rqcd)
                print "%s | %s | %.3lf | %.3lf" % (s.name, kf_regions[s]["SS"],
                                                   kf_SS[s], rqcd)
                print

        addon_regions = self.addon_regions

        h_fakes = self.data_sample.hist(
            region=addon_regions[self.data_sample]["SS"],
            histname=histname,
            icut=addon_regions[self.data_sample]["ncuts"])
        h_fakes.Scale(rqcd)

        h_addon = {}
        h_addon[self.data_sample] = h_fakes.Clone()

        for s in addon_regions.keys():
            if s == self.data_sample: continue
            h_addon[s] = s.hist(region=addon_regions[s]["OS"],
                                histname=histname,
                                icut=addon_regions[s]["ncuts"],
                                sys=sys,
                                mode=mode).Clone()
            h_addon[s].Scale(kf_OS[s])
            h_addon[s].Add(
                s.hist(region=addon_regions[s]["SS"],
                       histname=histname,
                       icut=addon_regions[s]["ncuts"],
                       sys=sys,
                       mode=mode).Clone(), -1.0 * rqcd * kf_SS[s])
        """
        ToDo: implement sys uncertainty for the scales!!!
        """
        if sys and "scale" in sys.name: pass

        if not self.sample.name == "fakes":
            for s in h_addon.keys():
                if self.sample.name == s.name:
                    return h_addon[s]
        else:
            return histutils.add_hists(h_addon.values())
Пример #40
0
def updateseriessquare(s, slowmode=1):  #fgrid is made here!
    # print("top of function line 28 (1): ", s['column1']['pval'], s['column2']['pval'], s['column3']['pval'], s['column4']['pval'], '\n')
    # print('before line 29 (2):',s['linecount'], s['netpval'],  s['pvalprefactor'], s['seriespval'], s['quadpval'], s['columnp'], '\n')
    # unpackingstruct(s, s_before, "place1.txt")
    s['column1'] = updateseries(s['column1'])
    s['column2'] = updateseries(s['column2'])
    s['column3'] = updateseries(s['column3'])
    s['column4'] = updateseries(s['column4'])
    s['numjs'] = np.size(s['column1']['fs'], axis=None)
    # print("after updateseries line 36 (3): ", s['column1']['pval'], s['column2']['pval'], s['column3']['pval'], s['column4']['pval'], '\n')
    # print('after 37 (4): ',s['linecount'], s['netpval'],  s['pvalprefactor'], s['seriespval'], s['quadpval'], s['columnp'], '\n')
    # print(f"extra 3: {s['numcolumnconstraints']},{s['columndiffspread']},{s['frange']}, diff: {s['a1diff']}, {s['b1diff']}, {s['b2diff']}, {s['a2diff']} \n")
    s['fgrid'] = np.zeros((s['numjs'], 4))

    # if s['dtype == 1
    s['fgrid'][:, 0] = s['column1']['fs']

    s['fgrid'][:, 1] = s['column2']['fs']

    s['fgrid'][:, 2] = s['column3']['fs']

    s['fgrid'][:, 3] = s['column4']['fs']

    if not 'originalfgrid' in s:
        s['originalfgrid'] = s['fgrid']

    if 'allpredicts' not in s:
        s['lineorder'] = s['fgrid'] * 0
        # updateseriessquare.m:27
        if s['flattype'] == 'D':
            if s['fgrid'][2, 0] != 0:
                s['lineorder'][2] = np.array([1, 3, 0, 0])
                s['lineorder'][3] = np.array([2, 0, 4, 0])
            else:
                s['lineorder'][2] = np.array([0, 0, 3, 1])
                s['lineorder'][3] = np.array([0, 4, 0, 2])
        elif s['flattype'] == '/':
            s['lineorder'][2] = np.array([3, 1, 0, 0])
            s['lineorder'][3] = np.array([0, 2, 0, 4])
        elif s['flattype'] == '\\':
            s['lineorder'][2] = np.array([0, 0, 1, 3])
            s['lineorder'][3] = np.array([4, 0, 2, 0])
#         s['lineorder(6,:) = [13 16  14  15];
#         s['lineorder(7,:) = [17 20  18  19];
#         s['lineorder(8,:) = [21 24  22  23];

        s['allpredicts'] = s['fgrid'] * 0
        s['alineorder'] = deepcopy(
            s['lineorder'])  #alineorder is same as MATLAB here
        s['corners'] = np.zeros((1, 8))

    s['maxnumlines'] = np.amax(s['lineorder'])
    s['maxnumlines'] = int(s['maxnumlines'])
    s['columnorder'] = np.zeros((1, s['maxnumlines']))
    s['roworder'] = copy(s['columnorder'])

    n = 1
    nonzero_lineorder = np.where(
        s['lineorder'])  #index where lineorder is nonzero
    indice_nz_lineorder = np.argsort(
        s['lineorder'][nonzero_lineorder]
    )  #lineorder[a]-> only where lineorder is nonzero, and returns the indices for the sorting of it
    s['roworder'] = nonzero_lineorder[0][
        indice_nz_lineorder]  #now apply the sorting to the nonzero indices
    s['columnorder'] = nonzero_lineorder[1][indice_nz_lineorder]
    #    for i in np.arange(0,s['numjs']).reshape(-1):
    #        for j in np.arange(0,3).reshape(-1):
    #            #             if s['originalfgrid(i,j) ~= 0
    ##                 s['alineorder(i,j) = n;
    ##                 n = n+1;
    ##             end
    #            thiso=s['lineorder'][i,j]
    #            thiso=int(thiso)
    #            print(f"this is i,{i}, and j,{j}, and this is thiso, {thiso}\n")
    #            print(f"this is columnorder, {s['columnorder']} and this is roworder {s['roworder']}\n")
    #            if thiso > 0:
    #                thiso_c= deepcopy(thiso)
    #                s['columnorder'][0][thiso_c]= deepcopy(j)
    #                s['roworder'][0][thiso_c]= deepcopy(i)
    #                print(f"4loop. this is i,{i}, and j,{j}, and this is thiso, {thiso}, this is thiso_c, {thiso_c}\n")
    #                print(f"4loop.this is columnorder, {s['columnorder']} and this is roworder {s['roworder']}\n")
    # s.update({'listpredicts': []})
    if 'listpredicts' not in s:
        s.update({'listpredicts': [0.0, 0.0, 0.0, 0.0]})

    for i in np.arange(0, 4):
        r = s['roworder'][i]
        c = s['columnorder'][i]
        # s['listpredicts'].append(s['fgrid'][r][c] - s['flatsquare']['flaterrors'][i]) #should not be list, but rather a np array
        s['listpredicts'][
            i] = s['fgrid'][r][c] - s['flatsquare']['flaterrors'][i]
        s['allpredicts'][r, c] = s['listpredicts'][i]

    s['allerrors'] = s['fgrid'] - s['allpredicts']

    fdiffs = np.array([])
    if np.size(s['column1']['realfs']) >= 2:
        fdiffs = np.diff(s['column1']['realfs'])

    if np.size(s['column4']['realfs']) >= 2:
        fdiffs = np.concatenate((fdiffs, np.diff(s['column4']['realfs'])))

    if np.size(fdiffs) > 0:
        s['bpluscguess'] = np.mean(fdiffs)
        s['bpluscerror'] = s['lowsidetolerance'][1]
    else:
        if np.size(s['column2']['realfs']) >= 2:
            fdiffs = np.diff(s['column2']['realfs'])

        if np.size(s['column3']['realfs']) >= 2:
            fdiffs = np.concatenate((fdiffs, np.diff(s['column3']['realfs'])))

        if np.size(fdiffs) > 0:
            s['bpluscguess'] = np.mean(fdiffs)
            s['bpluscerror'] = s['lowsidetolerance'][1]
        else:
            s['bpluscguess'] = 3000
            s['bpluscerror'] = s['bpluscguess'] * 0.9

    s['highestfullrow'] = 0
    s['lowestfullrow'] = 0

    n = np.size(s['column1']['fs'])
    i = np.size(s['column1']['fs']) - 1
    if (s['column1']['fs'][i] != 0) and (s['column2']['fs'][i] != 0) and (
            s['column3']['fs'](i) != 0) and (s['column4']['fs'][i] != 0):
        s['highestfullrow'] = i
# updateseriessquare.m:119
    if (s['column1']['fs'][n - i] !=
            0) and (s['column2']['fs'][n - i] !=
                    0) and (s['column3']['fs'][n - i] !=
                            0) and (s['column4']['fs'][n - i] != 0):
        s['lowestfullrow'] = n - i
# updateseriessquare.m:122
    if (s['column1']['fs'][i] != 0) or (s['column2']['fs'][i] != 0) or (
            s['column3']['fs'][i] != 0) or (s['column4']['fs'][i] != 0):
        s['highestpartialrow'] = i
# updateseriessquare.m:125
    if (s['column1']['fs'][n - i] != 0) or (
            s['column2']['fs'][n - i] != 0
    ) or (s['column3']['fs'][n - i] != 0) or (s['column4']['fs'][n - i] != 0):
        s['lowestpartialrow'] = n - i
# updateseriessquare.m:128

#s['degree = length(s['column1']['fs']) - 1;
    quadsums = np.array([])
    # updateseriessquare.m:132
    for i in np.arange(1, np.size(s['column1']['fs']) - 1):
        if (s['fgrid'][i, 0] != 0) and (s['fgrid'][i + 1, 0] != 0) and (
                s['fgrid'][i, 1] != 0) and (s['fgrid'][i + 1, 2] != 0):
            quadsums = np.append(
                quadsums, s['fgrid'][i, 0] + s['fgrid'][i + 1, 0] -
                s['fgrid'][i, 1] - s['fgrid'][i + 1, 2])
# updateseriessquare.m:135
        if (s['fgrid'][i, 3] != 0) and (s['fgrid'][i + 1, 3] != 0) and (
                s['fgrid'][i, 2] != 0) and (s['fgrid'][i + 1, 1] != 0):
            quadsums = np.append(
                quadsums, s['fgrid'][i, 3] + s['fgrid'][i + 1, 3] -
                s['fgrid'][i, 2] - s['fgrid'][i + 1, 1])
# updateseriessquare.m:138
        if (s['fgrid'][i, 0] != 0) and (s['fgrid'][i, 1] != 0) and (
                s['fgrid'][i + 1, 1] != 0) and (s['fgrid'][i + 1, 3] != 0):
            quadsums = np.append(
                quadsums, s['fgrid'][i, 0] - s['fgrid'][i + 1, 3] -
                s['fgrid'][i, 1] + s['fgrid'][i + 1, 1])
# updateseriessquare.m:141
        if (s['fgrid'][i + 1, 0] != 0) and (s['fgrid'][i, 2] != 0) and (
                s['fgrid'][i + 1, 2] != 0) and (s['fgrid'][i, 3] != 0):
            quadsums = np.append(
                quadsums, s['fgrid'][i + 1, 0] - s['fgrid'][i, 3] +
                s['fgrid'][i, 2] - s['fgrid'][i + 1, 2])
# updateseriessquare.m:144
        if (s['fgrid'][i, 0] != 0) and (s['fgrid'][i, 1] != 0) and (
                s['fgrid'][i, 2] != 0) and (s['fgrid'][i, 3] != 0):
            quadsums = np.append(
                quadsums, s['fgrid'][i, 0] + s['fgrid'][i, 3] -
                s['fgrid'][i, 2] - s['fgrid'][i, 1])
# updateseriessquare.m:147

    s['healthy'] = checkhealth(s)
    # updateseriessquare.m:150
    s['aheight'] = np.mean(
        np.concatenate((s['column1']['realhs'], s['column4']['realhs'])))
    # updateseriessquare.m:151
    s['bheight'] = np.mean(
        np.concatenate((s['column2']['realhs'], s['column3']['realhs'])))
    # updateseriessquare.m:152
    #     if length(s['column1']realhs) > 0
    #         s['aheight = mean(s['column1']realhs);
    #     else
    #         s['aheight = s['column4['realhs'](end);
    #     end
    #     if (length(s['column1']realhs) > 0) && (length(s['column4['realhs']) > 0)
    #         aheight = (s['column1']realhs(end) + s['column1']realhs(end))/2;
    #     end
    #     if length(s['column2['realhs']) > 0
    #         bheight = s['column2['realhs'](end);
    #     else
    #         bheight = s['column3['realhs'](end);
    #     end
    #     if (length(s['column2['realhs']) > 0) && (length(s['column3['realhs']) > 0)
    #         bheight = (s['column2['realhs'](end) + s['column3['realhs'](end))/2;
    #     end
    s['longquadstring'] = ('Quad sums:')
    # updateseriessquare.m:169
    for i in np.arange(0, np.size(quadsums)).reshape(-1):
        s['longquadstring'] = f"{s['longquadstring']} \n {quadsums[i]: .4f}"
# updateseriessquare.m:171

    s['medianquadsum'] = np.median(abs(quadsums))
    # updateseriessquare.m:173
    s['maxquadsum'] = max(abs(quadsums))
    # updateseriessquare.m:174
    s['quadstring'] = f"{np.size(quadsums)} quads, median {np.dot(s['medianquadsum'],1000): .1f},max {np.dot(s['maxquadsum'],1000): 0.1f} Khz"
    # updateseriessquare.m:175
    meanquadsum = math.sqrt(np.mean(quadsums**2))
    # updateseriessquare.m:176
    s['meanquadsum'] = max(meanquadsum, 0.001)
    # updateseriessquare.m:177
    if s['meanquadsum'] > 0.1:
        #     if containsf(s,3000) == 0
        #         fprintf('doesnt have 3000');
        #     end
        quadsums
        s['fgrid']
        #  error('quads dont work!');

    s['quadsums'] = quadsums
    # updateseriessquare.m:186
    s['onequadpval'] = s['meanquadsum'] / s['frange']
    # updateseriessquare.m:187
    sums = np.array([])
    # updateseriessquare.m:188
    csums = np.array([])
    # updateseriessquare.m:189
    s['allfs'] = np.concatenate(
        (s['column1']['realfs'], s['column2']['realfs'],
         s['column3']['realfs'], s['column4']['realfs']))
    # updateseriessquare.m:192
    s['allhs'] = np.concatenate(
        (s['column1']['realhs'], s['column2']['realhs'],
         s['column3']['realhs'], s['column4']['realhs']))
    # updateseriessquare.m:193
    s['listerrors'] = np.zeros((1, np.size(s['allfs'])))
    # updateseriessquare.m:195
    s['f1'] = s['fgrid'][s['roworder'][0]][s['columnorder'][0]]
    # updateseriessquare.m:196
    for i in np.arange(0, 4).reshape(-1):
        for j in np.arange(0, np.size(s['column1']['fs'])).reshape(-1):
            thiso = s['alineorder'][j, i]
            # updateseriessquare.m:199
            thisf = s['fgrid'][j, i]
            # updateseriessquare.m:200
            if thisf != 0:
                if thiso == 0:
                    1
                thiso = int(thiso)
                s['listerrors'][0][thiso - 1] = s['allerrors'][j, i]
# updateseriessquare.m:205
#    a=np.where(s['alineorder']) #index where lineorder is nonzero
#    kk=np.argsort(s['alineorder'][a]) #lineorder[a]-> only where lineorder is nonzero, and returns the indices for the sorting of it
#    s['roworder']=a[0][kk] #now apply the sorting to the nonzero indices
#    s['columnorder']=a[1][kk]

    s['allnormerrors'] = s['allerrors'] / s['f1']
    # updateseriessquare.m:209
    s['listnormerrors'] = s['listerrors'] / s['f1']
    # updateseriessquare.m:210
    s['errorstring'] = ('prediction\\n errors\\n')
    # updateseriessquare.m:211
    s['searchspace'] = 1
    # updateseriessquare.m:212
    for i in np.arange(0, np.size(s['listerrors'])).reshape(-1):
        if (i <= 11) and ((i == 1)
                          or np.mod(i, 2) == 0):  ##this needs to validated
            s['searchspace'] = s['searchspace'] * s['listerrors'][0][
                i]  ##stopped here
# updateseriessquare.m:216
        s['errorstring'] = f"{s['errorstring']} {s['listerrors'][0][i]: 0.2f}\\n"
# updateseriessquare.m:218

    s['errorstring'] = f"{s['errorstring']} NET \n {s['searchspace']} \n"
    # updateseriessquare.m:220
    s['nextline'] = np.size(s['allfs']) + 1
    # updateseriessquare.m:221
    s['aamaxerror'] = 0
    # updateseriessquare.m:224
    s['abmaxerror'] = 0
    # updateseriessquare.m:225
    aaerrors = np.array([])
    # updateseriessquare.m:226
    aberrors = np.array([])
    # updateseriessquare.m:227
    for i in np.arange(0, np.size(s['column1']['fs'])).reshape(-1):
        if (s['column1']['fs'][i] != 0) and (s['column4']['fs'][i] != 0):
            aaerrors = np.append(aaerrors,
                                 s['column1']['fs'][i] - s['column4']['fs'][i])
# updateseriessquare.m:230
        if (s['column1']['fs'][i] != 0) and (s['column2']['fs'][i] != 0):
            aberrors = np.append(aberrors,
                                 s['column1']['fs'][i] - s['column2']['fs'][i])
# updateseriessquare.m:233
        if (s['column1']['fs'][i] != 0) and (s['column3']['fs'][i] != 0):
            aberrors = np.append(aberrors,
                                 s['column1']['fs'][i] - s['column3']['fs'][i])
# updateseriessquare.m:236

    if np.size(aaerrors) > 0:
        s['aamaxerror'] = max(abs(aaerrors))
# updateseriessquare.m:240

    if np.size(aberrors) > 0:
        s['abmaxerror'] = max(abs(aberrors))
# updateseriessquare.m:243

    s['numlines'] = np.size(s['allfs'])
    # updateseriessquare.m:245
    s['numconstraints'] = (copy(s['numlines']) / 2) - 1
    # updateseriessquare.m:246

    s['degree'] = copy(s['numconstraints'])
    # updateseriessquare.m:247
    #if s['dtype == 1
    s['allafs'] = np.concatenate(
        (s['column1']['realfs'], s['column4']['realfs']))
    # updateseriessquare.m:249'
    s['allahs'] = np.concatenate(
        (s['column1']['realhs'], s['column4']['realhs']))
    # updateseriessquare.m:250
    s['allbfs'] = np.concatenate(
        (s['column2']['realfs'], s['column3']['realfs']))
    # updateseriessquare.m:251
    s['allbhs'] = np.concatenate(
        (s['column2']['realhs'], s['column3']['realhs']))
    # updateseriessquare.m:252
    # else
    #     s['allafs = [s['series2.realfs s['series3.realfs];
    #     s['allahs = [s['series2['realhs'] s['series3['realhs']];
    #     s['allbfs = [s['series1.realfs s['series4.realfs];
    #     s['allbhs = [s['series1['realhs'] s['series4['realhs']];
    # end
    s['heightstring'] = f"A heights {np.array2string(s['allahs'],3)}, B heights {np.array2string(s['allbhs'],3)}"
    # updateseriessquare.m:259
    #s['series4string = print('Series 4 predictions: #s',num2np.array2string(s['series4.fs,6));

    s['minf'] = min(s['allfs'])
    # updateseriessquare.m:262
    s['minh'] = min(s['allhs'])
    # updateseriessquare.m:263
    s['allbestfs'] = copy(s['allfs'])
    # updateseriessquare.m:264
    s['allbesths'] = copy(s['allhs'])
    # updateseriessquare.m:265

    s['bestfstring'] = np.array2string(s['allfs'], 6)
    # updateseriessquare.m:266
    s['series4string'] = 'no series 4 yet'
    # updateseriessquare.m:267
    s['allfstring'] = np.array2string(s['allfs'], 6)
    # updateseriessquare.m:268
    s['bendstring'] = '2 points, no bend'
    # updateseriessquare.m:269
    s['sortfs'] = np.sort(s['allfs'])
    # updateseriessquare.m:270
    s['usablefgrid'] = usablefgrid(s)
    # updateseriessquare.m:271
    s['gridhash'] = hashfromsquare(s)
    # updateseriessquare.m:272
    s['mindiff'] = min(np.diff(s['sortfs']))
    # updateseriessquare.m:273
    s['isoutlawed'] = s['column1']['outlawed'] or s['column2'][
        'outlawed'] or s['column3']['outlawed'] or s['column4'][
            'outlawed'] or (s['mindiff'] < 0.01)
    # updateseriessquare.m:274
    s['seriesstring'] = (
        "%s%s%s%s" % (s['column1']['outlawchar'], s['column2']['outlawchar'],
                      s['column3']['outlawchar'], s['column4']['outlawchar']))
    # updateseriessquare.m:275
    s['a1diff'] = 0
    # updateseriessquare.m:279
    s['a2diff'] = 0
    # updateseriessquare.m:280
    s['a1bend'] = 0
    # updateseriessquare.m:281
    s['a2bend'] = 0
    # updateseriessquare.m:282
    s['b1diff'] = 0
    # updateseriessquare.m:283
    s['b2diff'] = 0
    # updateseriessquare.m:284
    s['b1bend'] = 0
    # updateseriessquare.m:285
    s['b2bend'] = 0
    # updateseriessquare.m:286
    if np.size(s['column1']['realfs']) >= 2:
        s['a1diff'] = np.mean(np.diff(s['column1']['realfs']))
# updateseriessquare.m:288

    if np.size(s['column1']['realfs']) >= 3:
        s['a1bend'] = np.mean(np.diff(np.diff(s['column1']['realfs'])))
# updateseriessquare.m:292

    s['a1bendstring'] = f"a1diff' {s['a1diff']: 0.3f}, a1bend {s['a1bend']: 0.3f}"
    # updateseriessquare.m:294
    if np.size(s['column2']['realfs']) >= 2:
        s['b1diff'] = np.mean(np.diff(s['column2']['realfs']))
# updateseriessquare.m:297

    if np.size(s['column2']['realfs']) >= 3:
        s['b1bend'] = np.mean(np.diff(np.diff(s['column2']['realfs'])))
# updateseriessquare.m:301

    s['b1bendstring'] = f"b1diff {s['b1diff']: 0.3f}, b1bend {s['b1bend']: 0.3f}"
    # updateseriessquare.m:304
    if np.size(s['column4']['realfs']) >= 2:
        s['a2diff'] = np.mean(np.diff(s['column4']['realfs']))
# updateseriessquare.m:307

    if np.size(s['column4']['realfs']) >= 3:
        s['a2bend'] = np.mean(np.diff(np.diff(s['column4']['realfs'])))
# updateseriessquare.m:311

    s['a2bendstring'] = f"a2diff {s['a2diff']: 0.3f}, a2bend {s['a2bend']: 0.3f}"
    # updateseriessquare.m:313
    if np.size(s['column3']['realfs']) >= 2:
        s['b2diff'] = np.mean(np.diff(s['column3']['realfs']))
# updateseriessquare.m:316

    if np.size(s['column3']['realfs']) >= 3:
        s['b2bend'] = np.mean(np.diff(np.diff(s['column3']['realfs'])))
# updateseriessquare.m:320

    s['b2bendstring'] = f"b2diff {s['b2diff']: 0.3f}, b2bend {s['b2bend']: 0.3f}"
    # updateseriessquare.m:322
    s['tightdescriptor'] = (
        '%s\\n%s\\n%s\\n%s\\n%s\\naa tolerance %f, abtolerance %f\\n' %
        (s['a1bendstring'], s['a2bendstring'], s['b1bendstring'],
         s['b2bendstring'], s['quadstring'], s['aamaxerror'], s['abmaxerror']))
    # updateseriessquare.m:325
    s['columnp'] = 1
    # updateseriessquare.m:326
    s['columndiffs'] = np.array(
        [s['a1diff'], s['b1diff'], s['b2diff'], s['a2diff']])
    # updateseriessquare.m:327
    s['realcolumndiffs'] = s['columndiffs'][s['columndiffs'] < float('inf')]
    # updateseriessquare.m:328
    s['columndiffspread'] = max(s['realcolumndiffs'] -
                                min(s['realcolumndiffs']))
    # updateseriessquare.m:329
    s['numcolumnconstraints'] = np.size(s['realcolumndiffs']) - 1
    # updateseriessquare.m:330
    #reward for columns agreeing with each other.
    if s['numcolumnconstraints'] >= 1:
        s['columnp'] = (s['columndiffspread'] /
                        s['frange'])**s['numcolumnconstraints']
# updateseriessquare.m:333

    s['termstring'] = 'ud'
    # updateseriessquare.m:336
    if s['upterminated'] == 1:
        s['termstring'] = 'Ud'
# updateseriessquare.m:338

    if s['downterminated'] == 1:
        s['termstring'] = 'uD'
# updateseriessquare.m:341

#now do pvalues
# print('before 441 (5): ',s['linecount'], s['netpval'],  s['pvalprefactor'], s['seriespval'], s['quadpval'], s['columnp'], '\n')
# print(f"extra 3: {s['numcolumnconstraints']},{s['columndiffspread']},{s['frange']}, diff: {s['a1diff']}, {s['b1diff']}, {s['b2diff']}, {s['a2diff']} \n")
    s = addlevels(s)
    # print('after 442 (6): ',s['linecount'], s['netpval'],  s['pvalprefactor'], s['seriespval'], s['quadpval'], s['columnp'], '\n')
    # updateseriessquare.m:344
    #choose which row to do next
    allcoords = []
    allcoords.append(predictnext(s, 'ur'))
    # updateseriessquare.m:346
    allcoords.append(predictnext(s, 'ul'))
    # updateseriessquare.m:347
    allcoords.append(predictnext(s, 'dr'))
    # updateseriessquare.m:348
    allcoords.append(predictnext(s, 'dl'))
    # updateseriessquare.m:349
    if s['forcecorners'] == 0:
        dists = np.array((allcoords[0]['fdist'], allcoords[1]['fdist'],
                          allcoords[2]['fdist'], allcoords[3]['fdist']))
        # updateseriessquare.m:351
        #recn = find(recommended == 1);
        #dists = dists(recn);
        bestcorner = np.nonzero(dists == min(dists))[0][0]
        # updateseriessquare.m:357
        reccoords = allcoords[bestcorner]
        # updateseriessquare.m:358
        if min(dists) > 1000000000.0:
            s['closedout'] = 1
# updateseriessquare.m:360
    else:
        if (s['numlines'] + 1) > np.size(s['cornermap']):
            s['closedout'] = 1
# updateseriessquare.m:364
        else:
            reccoords = allcoords[s['cornermap'][s['numlines'] + 1]]
            # updateseriessquare.m:367
            bestcorner = s['cornermap'][s['numlines'] + 1]
# updateseriessquare.m:368

    if s['closedout'] == 0:
        try:
            s['corners'][int(s['numlines'])] = bestcorner
        except:
            s['corners'] = np.append(s['corners'], bestcorner)
# updateseriessquare.m:374
        s['nextcolumn'] = reccoords['c1']
        # updateseriessquare.m:376
        s['nextrow'] = reccoords['r1']
        # updateseriessquare.m:377
        s['nextnextcolumn'] = reccoords['c2']
        # updateseriessquare.m:379
        s['nextnextrow'] = reccoords['r2']
        # updateseriessquare.m:380
        s['alineorder'][s['nextrow'], s['nextcolumn'] - 1] = s['numlines'] + 1
        # updateseriessquare.m:381
        s['alineorder'][s['nextnextrow'],
                        s['nextnextcolumn'] - 1] = s['numlines'] + 2
        # updateseriessquare.m:382
        s['nextpredictf'] = reccoords['f1']
        # updateseriessquare.m:384
        s['nextnextpredictf'] = reccoords['f2']
        # updateseriessquare.m:385
        s['nextenergydiff'] = reccoords['energydiff']
        # updateseriessquare.m:386
        s['nextminf'] = reccoords['minf']
        # updateseriessquare.m:387
        s['nextmaxf'] = reccoords['maxf']
        # updateseriessquare.m:388
        s['allpredicts'][s['nextrow'], s['nextcolumn'] - 1] = s['nextpredictf']
# updateseriessquare.m:389
    else:
        s['nextcolumn'] = 0
        # updateseriessquare.m:391
        s['nextrow'] = 0
        # updateseriessquare.m:392
        s['nextnextcolumn'] = 0
        # updateseriessquare.m:393
        s['nextnextrow'] = 0
# updateseriessquare.m:394

    if (s['prolate'] == 1) and (s['oblate'] == 1):
        s['isoutlawed'] = 1
# updateseriessquare.m:398

    if s['prolate'] == 0:
        1
        sold = s
        # updateseriessquare.m:403
        s = flipseriessquare(s)
        # updateseriessquare.m:404
        1

    # print('before 529 (7): ',s['linecount'], s['netpval'],  s['pvalprefactor'], s['seriespval'], s['quadpval'], s['columnp'], '\n')
    s['pvalprefactor'] = 1
    # updateseriessquare.m:408
    for h in s['allhs']:
        linecount = countfrommcounttool(s['counttool'], h)
        # updateseriessquare.m:410
        s['pvalprefactor'] = s['pvalprefactor'] * (linecount * 1.5)
# updateseriessquare.m:411
# print('before 537 (8): ',s['linecount'], s['netpval'],  s['pvalprefactor'], s['seriespval'], s['quadpval'], s['columnp'], '\n')
    s['linecount'] = countfrommcounttool(s['counttool'], min(s['allhs']))
    # updateseriessquare.m:413
    s['oldpvalprefactor'] = s['linecount']**np.size(s['allfs'])
    # updateseriessquare.m:415
    s['seriespval'] = s['column1']['pval'] * s['column2']['pval'] * s[
        'column3']['pval'] * s['column4']['pval']
    # updateseriessquare.m:417
    s['quadpval'] = s['onequadpval']**s['numconstraints']
    # updateseriessquare.m:418
    s['netpval'] = s['pvalprefactor'] * s['seriespval'] * s['quadpval'] * s[
        'columnp']
    # print('after 547 (9): ',s['linecount'], s['netpval'],  s['pvalprefactor'], s['seriespval'], s['quadpval'], s['columnp'], '\n')
    # updateseriessquare.m:420
    if np.isnan(s['netpval']):
        1

    if not 'originalpval' in s:
        s['originalpval'] = s['netpval']
# updateseriessquare.m:425

    f2f1 = s['column1']['fs'][1] - s['column1']['fs'][0]
    # updateseriessquare.m:428
    if s['column1']['fs'][2] > 0:
        s['predictoffset'] = s['column1']['fs'][2] / s['bpluscguess']
# updateseriessquare.m:430
    else:
        s['predictoffset'] = (s['column1']['fs'][3] / s['bpluscguess']) - 1
# updateseriessquare.m:432

    s['pvalstring'] = (
        'net pval %.1e [%.1e original], %d constraints, [%.1e %.1e]' %
        (s['netpval'], s['originalpval'], s['numconstraints'], s['seriespval'],
         s['quadpval']))
    # updateseriessquare.m:434
    s['shortpvalstring'] = ('net pval %.1e, %d constraints' %
                            (s['netpval'], s['numconstraints']))
    # updateseriessquare.m:435
    s['verbosebend'] = (
        '%s\\n %s\\n %s' %
        (s['column1']['predictstring'], s['column2']['predictstring'],
         s['column3']['predictstring']))
    # updateseriessquare.m:436
    s['descriptor'] = ('%s square of degree %d, %d lines' %
                       (s['termstring'], s['degree'], s['numlines']))
    # updateseriessquare.m:437
    if (s['degree'] >= s['tightnesssettings']['mindegree']) and (
            s['netpval'] < s['tightnesssettings']['checkablepval']):
        s['testable'] = 1
# updateseriessquare.m:439
    else:
        s['testable'] = 0


# updateseriessquare.m:441
    return s
Пример #41
0
def addlevels(s):

    #strings together transitions to find levels['  not particularly fast. by
    #the end, levels['maxe and levels['mine should reflect the spread of
    #calculated values[' overcontraints simply ignored - work it out later
    #levels['isknown = zeros(s['numjs+1,2); #how many constr

    #also labels items in fgrid which are searchable or overconstrained.
    #s['fstatus can be:
    #s['fstatus(i,j) == 0 # untouched
    #s['fstatus(i,j) == 1 # known
    #s['fstatus(i,j) == 2 # worth searching - connects to two. set flimits?
    #maybe not
    #s['fstates(i,j) == 3 # MUST search - overconstrained. s['flimits set tight
    #fstatus = s['
    levels = {}
    levels['energy'] = np.zeros((s['numjs'] + 1, 2))
    # updateseriessquare.m:504
    #levels['energy(2,1) = 1;
    if s['fgrid'][2, 0] != 0:
        levels['energy'][2, 0] = -1
# updateseriessquare.m:507
    else:
        if s['fgrid'][2, 3] != 0:
            levels['energy'][2, 1] = -1
# updateseriessquare.m:510
        else:
            raise Exception('grid is a blank-o')

    growing = 1
    # updateseriessquare.m:515
    while growing == 1:

        growing = 0
        # updateseriessquare.m:517
        for j in np.arange(0, s['numjs']).reshape(-1):
            for i in np.arange(0, 4).reshape(-1):
                if s['fgrid'][j, i] != 0:
                    levels, added = addlinetolevels(
                        levels, j, i,
                        s['fgrid'][j, i])  #levels is the one that is wrong
                    # updateseriessquare.m:521
                    if added:
                        growing = 1
# updateseriessquare.m:523

    minenergy = min(np.ndarray.flatten(levels['energy']))
    # updateseriessquare.m:529
    newenergies = 1e-08 + copy(levels['energy']) - minenergy
    # updateseriessquare.m:531

    for i in np.arange(0, s['numjs'] + 1):
        for j in np.arange(0, 2):
            if levels['energy'][i, j] == 0:
                newenergies[i, j] = 0
# updateseriessquare.m:535

    s['prolate'] = 0
    # updateseriessquare.m:539
    s['oblate'] = 0
    # updateseriessquare.m:540
    for i in np.arange(0, s['numjs'] + 1).reshape(-1):
        if (newenergies[i, 0] != 0) and (newenergies[i, 1] != 0):
            if (newenergies[i, 0] < newenergies[i, 1]):
                s['prolate'] = 1
# updateseriessquare.m:544
            else:
                s['oblate'] = 1


# updateseriessquare.m:546

#     end
#     if (newenergies(i,1) ~= 0) && (newenergies(i,2) ~= 0) && (newenergies(i,1) < newenergies(i,2))
#         s['oblate = 1;
#     end
# end
    s['energies'] = newenergies
    # updateseriessquare.m:555
    #strings together transitions to find the levels, which are nominally
    #needed just for plotting, but also are a good thing to keep in mind.ab
    return s
Пример #42
0
	def _ComputeGeoProximity(self, ptsFeatcls, lingVal):
		"""
		描述:
			用于从地理信息中提取临近信息。临近信息的单位由数据本身所决定
		输入参数:
			ptsFeatcls: 点要素类的名称
			lingVal: lingValue的实例
		返回值:
			{ptId : proximity_ptId, ...}
				# Assign the values of parameters
		"""
		
		# Convert linguistic features to points
		path, name = os.path.split(self.inFeatcls_)
		oFeatCls = os.path.join(path, lingVal.FeatCls)
		
		ptsX, ptsY = build_value_lists(ptsFeatcls)
		ptsNearDists = {}
		
		studyArea = []
		
		#提取地理信息。对Vector采用Distance,对于Raster采用Extract
		if lingVal.FeatType == "Vector":
			featCls = FeatureClass(oFeatCls)
			#对每个要素更新所有点的最近距离信息。
			for feat in featCls.Features():
				#点要素
				if feat.ShapeTypeName() == Shape.Point:
					shpPt = Point.FromFeature(feat)
					#求得点到点的距离,并更新每一个点的最近距离要素
					for ptKey in ptsX.keys():
						dist = ((ptsX[ptKey] - shpPt.X) ** 2 + (ptsY[ptKey] - shpPt.Y) ** 2) ** 0.5
						ptsNearDists[ptKey] = min(dist, ptsNearDists.get(ptKey, dist))
				#线和面要素
				else:
					shpLine = Line.FromFeature(feat)
					#分成多个segment,求每个segment到点的最近距离。
					for seg in shpLine.OuteriorSegments():
						studyArea = [(pt.X, pt.Y) for pt in seg]
					
						#长度小于3,需要进行逆转以后才能使用nearst_point
						if len(studyArea) < 3:
							studyAreaRev = copy(studyArea)
							studyAreaRev.reverse()
							studyArea += studyAreaRev[1:]
						
						#利用segment的最近距离更新全部的最近距离
						segNearestDists = nearest_point(studyArea, ptsX, ptsY)[0]
						for ptKey in segNearestDists.keys():
							dist = segNearestDists[ptKey]
							ptsNearDists[ptKey] = min(dist, ptsNearDists.get(ptKey, dist))
							
		if lingVal.FeatType == "Raster":
			#提取信息到临时要素类中
			tmpExtractedFeatcls = os.path.join(path, 'tmp_'+ name)
			rasterFeatCls = os.path.join(os.path.dirname(path), lingVal.FeatCls)
			
			logger.WriteLog("Extracting Values To Points...")
			pGP.CheckOutExtension("Spatial")
			pGP.ExtractValuesToPoints_sa(ptsFeatcls, rasterFeatCls, tmpExtractedFeatcls, "INTERPOLATE")
			
			#读取临时要素类信息
			logger.WriteLog("Read Raster Values...")
			tmpFeatcls = FeatureClass(tmpExtractedFeatcls)
			ptsNearDists = dict([(feat.ID(), feat["RASTERVALU"]) for feat in tmpFeatcls.Features()])
			
			if pGP.Exists(tmpExtractedFeatcls):
				pGP.Delete_management(tmpExtractedFeatcls)
		
		return ptsNearDists
Пример #43
0
	def getState(self):
		''' @fn : getState
			@brief : Return state of manager.
		'''	
		return copy(self.state)
Пример #44
0
 def gen_dynEL_g(self):
     self.g2 = copy(zero_matrix(SR, self.dof, 1))
     for i in range(0, self.dof):
         self.g2[i, 0] = self.U.derivative(self.q[i, 0])
     return self
Пример #45
0
def main():
    """ Main function to be run. """
    parser = argparse.ArgumentParser(
        description='Run the Guided Policy Search algorithm.')
    parser.add_argument('experiment', type=str, help='experiment name')
    parser.add_argument('-n',
                        '--new',
                        action='store_true',
                        help='create new experiment')
    parser.add_argument('-t',
                        '--targetsetup',
                        action='store_true',
                        help='run target setup')
    parser.add_argument('-r',
                        '--resume',
                        metavar='N',
                        type=int,
                        help='resume training from iter N')
    parser.add_argument('-p',
                        '--policy',
                        metavar='N',
                        type=int,
                        help='take N policy samples (for BADMM/MDGPS only)')
    parser.add_argument('-s',
                        '--silent',
                        action='store_true',
                        help='silent debug print outs')
    parser.add_argument('-q',
                        '--quit',
                        action='store_true',
                        help='quit GUI automatically when finished')
    args = parser.parse_args()

    exp_name = args.experiment
    resume_training_itr = args.resume
    test_policy_N = args.policy

    from gps import __file__ as gps_filepath
    gps_filepath = os.path.abspath(gps_filepath)
    gps_dir = '/'.join(str.split(gps_filepath, '/')[:-3]) + '/'
    exp_dir = gps_dir + 'experiments/' + exp_name + '/'
    hyperparams_file = exp_dir + 'hyperparams.py'

    if args.silent:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.INFO)
    else:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.DEBUG)

    if args.new:
        from shutil import copy

        if os.path.exists(exp_dir):
            sys.exit("Experiment '%s' already exists.\nPlease remove '%s'." %
                     (exp_name, exp_dir))
        os.makedirs(exp_dir)

        prev_exp_file = '.previous_experiment'
        prev_exp_dir = None
        try:
            with open(prev_exp_file, 'r') as f:
                prev_exp_dir = f.readline()
            copy(prev_exp_dir + 'hyperparams.py', exp_dir)
            if os.path.exists(prev_exp_dir + 'targets.npz'):
                copy(prev_exp_dir + 'targets.npz', exp_dir)
        except IOError as e:
            with open(hyperparams_file, 'w') as f:
                f.write(
                    '# To get started, copy over hyperparams from another experiment.\n'
                    +
                    '# Visit rll.berkeley.edu/gps/hyperparams.html for documentation.'
                )
        with open(prev_exp_file, 'w') as f:
            f.write(exp_dir)

        exit_msg = ("Experiment '%s' created.\nhyperparams file: '%s'" %
                    (exp_name, hyperparams_file))
        if prev_exp_dir and os.path.exists(prev_exp_dir):
            exit_msg += "\ncopied from     : '%shyperparams.py'" % prev_exp_dir
        sys.exit(exit_msg)

    if not os.path.exists(hyperparams_file):
        sys.exit("Experiment '%s' does not exist.\nDid you create '%s'?" %
                 (exp_name, hyperparams_file))

    hyperparams = imp.load_source('hyperparams', hyperparams_file)
    if args.targetsetup:
        try:
            import matplotlib.pyplot as plt
            from gps.agent.ros.agent_ros import AgentROS
            from gps.gui.target_setup_gui import TargetSetupGUI

            agent = AgentROS(hyperparams.config['agent'])
            TargetSetupGUI(hyperparams.config['common'], agent)

            plt.ioff()
            plt.show()
        except ImportError:
            sys.exit('ROS required for target setup.')
    elif test_policy_N:
        import random
        import numpy as np
        import matplotlib.pyplot as plt

        seed = hyperparams.config.get('random_seed', 0)
        random.seed(seed)
        np.random.seed(seed)

        data_files_dir = exp_dir + 'data_files/'
        data_filenames = os.listdir(data_files_dir)
        algorithm_prefix = 'algorithm_itr_'
        algorithm_filenames = [
            f for f in data_filenames if f.startswith(algorithm_prefix)
        ]
        current_algorithm = sorted(algorithm_filenames, reverse=True)[0]
        current_itr = int(
            current_algorithm[len(algorithm_prefix):len(algorithm_prefix) + 2])

        gps = GPSMain(hyperparams.config)
        if hyperparams.config['gui_on']:
            test_policy = threading.Thread(target=lambda: gps.test_policy(
                itr=current_itr, N=test_policy_N))
            test_policy.daemon = True
            test_policy.start()

            plt.ioff()
            plt.show()
        else:
            gps.test_policy(itr=current_itr, N=test_policy_N)
    else:
        import random
        import numpy as np
        import matplotlib.pyplot as plt

        seed = hyperparams.config.get('random_seed', 0)
        random.seed(seed)
        np.random.seed(seed)

        gps = GPSMain(hyperparams.config, args.quit)
        if hyperparams.config['gui_on']:
            run_gps = threading.Thread(
                target=lambda: gps.run(itr_load=resume_training_itr))
            run_gps.daemon = True
            run_gps.start()

            plt.ioff()
            plt.show()
        else:
            gps.run(itr_load=resume_training_itr)
Пример #46
0
    # override default config
    dataset = args.data.upper()
    if dataset == 'PEMS03':
        config["id_filename"] = "data/PEMS03/PEMS03.txt"
        config["num_of_vertices"] = 358
    elif dataset == 'PEMS04':
        config["id_filename"] = None
        config["num_of_vertices"] = 307
    elif dataset == 'PEMS07':
        config["id_filename"] = None
        config["num_of_vertices"] = 883
    elif dataset == 'PEMS08':
        config["id_filename"] = None
        config["num_of_vertices"] = 170
    else:
        raise Exception(
            f'Input data is {args.data}, only support PEMS03/04/07/08')
    config["adj_filename"] = f"data/{dataset}/{dataset}.csv"
    config["graph_signal_matrix_filename"] = f"data/{dataset}/{dataset}.npz"
    config["pearsonr_adj_filename"] = f"data/{dataset}/{dataset}_pearsonr.npz"
    arg_dict = copy(vars(args))
    for key, value in vars(args).items():
        if value is None:
            arg_dict.pop(key)
    config.update(arg_dict)

    print(json.dumps(config, sort_keys=True, indent=4))
    log_name = input('log_name:\n')
    wandb.init(project="GNN2", config=config, notes=log_name)
    train_QTable(config, log_name)
Пример #47
0
    def go_to_goal(self, goal=[None], to_goal=[None], offset=0.05):

        if goal[0] == None:
            goal = deepcopy(self.goal)
            self_goal = True
            rospy.loginfo("goal: " + str(goal))
            goal_off = deepcopy(self.goal_off)
            if self.sim:
                # update goal_off because ee_position changes
                goal_off = goal - offset * self.normalize(goal -
                                                          self.ee_position)

        else:
            self_goal = False
            if to_goal[0] == None:
                goal_off = goal - offset * self.normalize(goal -
                                                          self.ee_position)
            else:
                goal_off = goal - offset * self.normalize(to_goal)

        while numpy.linalg.norm(goal_off - self.ee_position
                                ) > 0.01 and not rospy.is_shutdown():
            rospy.loginfo_throttle(0.2, "goal_off: " + str(goal_off))
            # print("ee_position: " + str(self.ee_position))
            #print("ee_orientation: " + str(self.ee_orientation))
            if (self_goal):  # means servo'ing to a dynamic target
                goal = deepcopy(self.goal)
                goal_off = deepcopy(self.goal_off)
                if self.sim:
                    # update goal_off because ee_position changes
                    goal_off = goal - offset * self.normalize(goal -
                                                              self.ee_position)
                self.recovery_trajectory.append(copy(self.manipulator_joints))

            rospy.loginfo_throttle(
                0.2, "ee distance from apple: " +
                str(numpy.linalg.norm(self.ee_position - goal)))
            rospy.loginfo_throttle(
                0.2, "[distance calc] ee_position: " + str(self.ee_position))
            rospy.loginfo_throttle(0.2, "[distance calc] goal: " + str(goal))

            if numpy.linalg.norm(
                    self.ee_position - self.goal
            ) < 0.3:  # 0.2m min range on sr300 and +0.1 to account for camera frame offset from EE
                rospy.loginfo_throttle(
                    0.2,
                    "disabling updating of apple position because too close")
                if not self.sim:
                    self.enable_bridge_pub.publish(Bool(False))
            else:
                if not self.sim:
                    self.enable_bridge_pub.publish(Bool(True))

            # check if joints are outside the limits
            # joints = self.arm.joint_angles()
            # joints = joints.values()[::-1]  # need to reverse because method's ordering is j6-j0

            if not self.is_in_joint_limits():
                return False
                # sys.exit()

            des_vel_t = self.K_V * (goal_off - self.ee_position)

            if to_goal[0] != None:
                des_omega = -self.K_VQ * self.get_angular_velocity([None],
                                                                   to_goal)
            else:
                des_omega = -self.K_VQ * self.get_angular_velocity(goal)

            #print("omega: " + str(des_omega))

            #des_omega = numpy.array([0.0, 0.0, 0.0])

            des_vel = numpy.append(des_vel_t, des_omega)

            #print "goal: ", self.goal, " off: ", self.goal_off
            #print "des_vel: ", des_vel

            #singularity check
            if not self.is_greater_min_manipulability():
                return False
            else:
                joint_vel = self.compute_joint_vel(des_vel)
                # print("joint_vel: " + str(joint_vel))
                if self.sim:
                    msg = JointTrajectoryPoint()
                    msg.velocities = joint_vel
                    self.sim_joint_velocities_pub.publish(msg)
                else:
                    cmd = self.arm.joint_velocities()
                    cmd = dict(zip(cmd.keys(), joint_vel[::-1]))
                    self.arm.set_joint_velocities(cmd)

            # rospy.sleep(0.1)

        self.stop_arm()
        return True
Пример #48
0
                sample_id_final[ci] = 0
            else:
                sample_id_final[ci] = tmp[0]

        loc = np.argwhere(sample_id_final == 0)
        tmp = sample_id_final[loc]
        sample_id_final[loc] = sample_id_final[0]
        sample_id_final[0] = tmp
        num_valid_sample = 7
        face_data = face_data[sample_id_final, :, :, :]
        face_normal = face_normal[sample_id_final, :, :, :]

    input_data = face_normal

    sz_input_data = input_data.shape
    input_data_sample = copy(input_data)
    input_data = np.repeat(
        input_data[0:1, :, :, :], branch_num, axis=0
    )  # the first example store the well aligned training data, others are neighbors

    input_data = face_normal

    input_data = np.repeat(input_data[0:1, :, :, :], branch_num, axis=0)

    generator.net.blobs['sto_code1'].data[...] = code1
    generator.net.blobs['sto_code2'].data[...] = code2

    generator.net.blobs['data1'].data[:, :, 0:128, 0:128] = input_data
    generator.net.blobs['data2'].data[...] = (
        generator.net.blobs['data1'].data[:, :, 0::2, 0::2] +
        generator.net.blobs['data1'].data[:, :, 0::2, 1::2] +
Пример #49
0
 def get_start(self):
     """Returns the start time of the series as defined in the header
     -------
     A string of the start time
     """
     return copy(self.start)
Пример #50
0
def BestFirstSolver(c,d,A,G,b):
	
	xSize = size(c)
	ySize = size(d)
	consSize = len(A)
		
	OriginalProb = LpProblem("OrigProb",LpMinimize)
	x = LpVariable.dicts("x", range(xSize),  cat="Continuous")
	y = LpVariable.dicts("y", range(ySize),  cat="Continuous")
	
	# Formulating the objective function
	Obj1 = LpAffineExpression([x[i],c[i]] for i in range(xSize))
	Obj2 = LpAffineExpression([y[i],d[i]] for i in range(ySize))
	Objective = Obj1 + Obj2
	OriginalProb+= Objective
	
	#Adding the original constraints
	for i in range(consSize):
		Const = LpAffineExpression([x[j],A[i][j]] for j in range(xSize))
		Const = Const + LpAffineExpression([y[j],G[i][j]] for j in range(ySize))
		OriginalProb+= Const <= b[i]
	
	
	liveNodes = dict()
	ProbHeap = PriorityQueue()
	RootNode = LPNode('2')
	liveNodes['2'] = RootNode
	OriginalProb.solve(cbc_solver)
	variables = OriginalProb.variables()
	RootNode.sol(variables,LpStatus[OriginalProb.status],value(OriginalProb.objective))
	ProbHeap.put((value(OriginalProb.objective),RootNode))
	
	BestSol = float("inf")
	currBestSol = None
	while ProbHeap.empty() == False:
		
		currNode = ProbHeap.get()[1]
		currCode = currNode.code
		currNode.status = 2

		optVal = currNode.optVal
		
		if currNode.isSolved!='Infeasible' and currNode.isSolved!='Unbounded' and BestSol > optVal:
			for v in currNode.variables:
				if v.name[0] == 'y' and v.varValue != floor(v.varValue):
					currNode.status = 0
					branchVar = int(v.name[2:])
					branchPoint = floor(v.varValue)
					
		if currNode.status == 2 and currNode.isSolved=='Optimal' and BestSol > optVal:
			currBestSol = NewProb.copy()
			BestSol = optVal
			
		if currNode.status == 0:
			newNode = LPNode(currCode+'0')
			AddlC = copy(currNode.AddlC)
			AddlB = copy(currNode.AddlB)
			newConst = y[branchVar]
			AddlC.append(newConst)
			AddlB.append(branchPoint)
			newNode.add_Constraint(AddlC,AddlB)
			NewProb = OriginalProb.copy()
			
			if size(AddlC) != 0: 
				for i in range(size(AddlC)):
					NewProb+= AddlC[i] <= AddlB[i]
			
			NewProb.solve(cbc_solver)
			variables = NewProb.variables()
			newNode.sol(variables,LpStatus[NewProb.status],value(NewProb.objective))
			ProbHeap.put((value(OriginalProb.objective),newNode))
						
			newNode = LPNode(currCode+'1')
			AddlC = copy(currNode.AddlC)
			AddlB = copy(currNode.AddlB)
			newConst = -y[branchVar]
			AddlC.append(newConst)
			AddlB.append(-branchPoint - 1)
			newNode.add_Constraint(AddlC,AddlB)
			NewProb = OriginalProb.copy()
			
			if size(AddlC) != 0: 
				for i in range(size(AddlC)):
					NewProb+= AddlC[i] <= AddlB[i]
			
			NewProb.solve(cbc_solver)
			variables = NewProb.variables()
			newNode.sol(variables,LpStatus[NewProb.status],value(NewProb.objective))
			ProbHeap.put((value(OriginalProb.objective),newNode))
			
			
		
		
	if currBestSol == None:	
		return Soln('Infeasible or Unbounded',[],[],float("inf"))
		
	else:
		currBestSol.solve(cbc_solver)
		ySol = dict()
		xSol = dict()
		for v in currBestSol.variables():
				if v.name[0] == 'y':
					ySol[v.name[2:]] = v.varValue
				if v.name[0] == 'x':
					xSol[v.name[2:]] = v.varValue
		
		print value(currBestSol.objective)
		return Soln('Solved', xSol, ySol, value(currBestSol.objective))
Пример #51
0
   >>> spam
   [0, 'Hello!', 2, 3, 4, 5]
   >>> cheese # The cheese variable refers to the same list.
   [0, 'Hello!', 2, 3, 4, 5]


All values in Python have a unique identity that can be obtained with the id() function

bacon = 'Hello'
>>> id(bacon)
44491136
>>> bacon += ' world!' # A new string is made from 'Hello' and ' world!'.
>>> id(bacon) # bacon now refers to a completely different string.
44609712

The copy Module’s copy() and deepcopy() Functions
-------------------------------------------------
+ copy.copy(), can be used to make a duplicate copy of a mutable value like a list
or dictionary

import copy
>>> spam = ['A', 'B', 'C', 'D']
>>> id(spam)
44684232
>>> cheese = copy.copy(spam)
>>> id(cheese) # cheese is a different list with different identity.
44685832
>>> cheese[1] = 42
>>> spam
['A', 'B', 'C', 'D']
>>> cheese
Пример #52
0
def DFSSolver(c,d,A,G,b):
	
	xSize = size(c)
	ySize = size(d)
	consSize = len(A)
		
	OriginalProb = LpProblem("OrigProb",LpMinimize)
	x = LpVariable.dicts("x", range(xSize),  cat="Continuous")
	y = LpVariable.dicts("y", range(ySize),  cat="Continuous")
	
	# Formulating the objective function
	Obj1 = LpAffineExpression([x[i],c[i]] for i in range(xSize))
	Obj2 = LpAffineExpression([y[i],d[i]] for i in range(ySize))
	Objective = Obj1 + Obj2
	OriginalProb+= Objective
	
	#Adding the original constraints
	for i in range(consSize):
		Const = LpAffineExpression([x[j],A[i][j]] for j in range(xSize))
		Const = Const + LpAffineExpression([y[j],G[i][j]] for j in range(ySize))
		OriginalProb+= Const <= b[i]
	
	
	liveNodes = dict()
	ProbStack = []
	RootNode = LPNode('2')
	liveNodes['2'] = RootNode
	ProbStack.append(RootNode)
	BestSol = float("inf")
	currBestSol = None
	#problem stack initially contains the root node
	while size(ProbStack) > 0:
		
		currNode = ProbStack.pop()
		currCode = currNode.code
		#newProb contains a shallow copy of the original problem
		NewProb = OriginalProb.copy()
		
		
		#adding a new constaint to the original problem
		if size(currNode.AddlC) != 0: 
			for i in range(size(currNode.AddlC)):
				NewProb+= currNode.AddlC[i] <= currNode.AddlB[i]
		

		NewProb.solve(cbc_solver)
		#currNode status represent whether the node has been visited or not. 
		#0 - Not Visited
		#1 - Visited but not completed
		#2 - Completed
		currNode.status = 2
		count = 0
		optVal = value(NewProb.objective)
		if LpStatus[NewProb.status]!='Infeasible' and LpStatus[NewProb.status]!='Unbounded' and BestSol > optVal:
			#add heuristic for node selection here
			fracvariables = []
			count = count + 1
			k = 1
			for v in NewProb.variables():
				if v.name[0] == 'y' and v.varValue != floor(v.varValue):
					currNode.status = 0
					branchVar = int(v.name[2:])
					branchPoint = floor(v.varValue)
					if len(fracvariables) < k :
						fracvariables.append([branchVar, branchPoint])
					else:
						break
					#why are we not breaking once we have found the branching variable
					
		if currNode.status == 2 and LpStatus[NewProb.status]=='Optimal' and BestSol > optVal:
			currBestSol = NewProb.copy()
			BestSol = optVal
			
		
		if currNode.status == 0:
			bestVar = 0
			bestBranchPoint = 0
			optvalsofar = float("inf")
			for tempnode in fracvariables:
				branchVar = tempnode[0]
				branchPoint = tempnode[1]
				newNode = LPNode(currCode+'0')
				AddlC = copy(currNode.AddlC)
				AddlB = copy(currNode.AddlB)
				newConst = y[branchVar]
				AddlC.append(newConst)
				AddlB.append(branchPoint)
				newNode.add_Constraint(AddlC,AddlB)
				# ProbStack.append(newNode)
				NewProb2 = OriginalProb.copy()
				#adding a new constaint to the original problem
				if size(newNode.AddlC) != 0: 
					for i in range(size(newNode.AddlC)):
						NewProb2 += newNode.AddlC[i] <= newNode.AddlB[i]

				NewProb2.solve(cbc_solver)
				optVal2 = value(NewProb2.objective)
				
				if LpStatus[NewProb2.status]!='Infeasible' and LpStatus[NewProb2.status]!='Unbounded' and optVal2 < optvalsofar:
					bestVar = branchVar
					bestBranchPoint = branchPoint

			branchVar = bestVar
			branchPoint = bestBranchPoint
			newNode = LPNode(currCode+'0')
			AddlC = copy(currNode.AddlC)
			AddlB = copy(currNode.AddlB)
			newConst = y[branchVar]
			AddlC.append(newConst)
			AddlB.append(branchPoint)
			newNode.add_Constraint(AddlC,AddlB)
			ProbStack.append(newNode)
			
			newNode = LPNode(currCode+'1')
			AddlC = copy(currNode.AddlC)
			AddlB = copy(currNode.AddlB)
			newConst = -y[branchVar]
			AddlC.append(newConst)
			AddlB.append(-branchPoint - 1)
			newNode.add_Constraint(AddlC,AddlB)
			ProbStack.append(newNode)
				#once a new problem is created solve it
		
	if currBestSol == None:	
		return Soln('Infeasible or Unbounded',[],[],float("inf"))
		
	else:
		currBestSol.solve(cbc_solver)
		ySol = dict()
		xSol = dict()
		for v in currBestSol.variables():
				if v.name[0] == 'y':
					ySol[v.name[2:]] = v.varValue
				if v.name[0] == 'x':
					xSol[v.name[2:]] = v.varValue
		print value(currBestSol.objective)
		print ySol
		return Soln('Solved', xSol, ySol, value(currBestSol.objective))
Пример #53
0
        if s % d == 0:
            return True
    return False


res = []
while len(res) < m:
    good = False
    while not good:
        good = True
        for k in range(2, 11):
            good = good and check(a, k)
        if good:
            break
        next(a)
    res.append(copy(a))
    print len(res), a
    next(a)

with open("out.txt", "w") as f:
    f.write("Case #1:\n")
    for a in res:
        f.write("".join(map(str, a)))
        for k in range(2, 11):
            s = 0
            for c in a:
                s = s * k + c
            for d in divs:
                if s % d == 0:
                    f.write(" %d" % d)
                    break
Пример #54
0
def main():

    # INPUT ARGUMENTS #

    parser = argparse.ArgumentParser(
        description='Run the Guided Policy Search algorithm.')
    parser.add_argument('-e',
                        '--experiment',
                        type=str,
                        default='box2d_arm_example',
                        help='experiment name')
    parser.add_argument('-n',
                        '--new',
                        action='store_true',
                        help='create new experiment')
    parser.add_argument('-t',
                        '--targetsetup',
                        action='store_true',
                        help='run target setup')
    parser.add_argument('-r',
                        '--resume',
                        metavar='N',
                        type=int,
                        help='resume training from iter N')
    parser.add_argument('-p',
                        '--policy',
                        metavar='N',
                        type=int,
                        help='take N policy samples (for BADMM/MDGPS only)')
    parser.add_argument('-s',
                        '--silent',
                        action='store_true',
                        help='silent debug print outs')
    parser.add_argument('-q',
                        '--quit',
                        action='store_true',
                        help='quit GUI automatically when finished')
    args = parser.parse_args()

    # INPUT VARIABLES #

    exp_name = args.experiment  # experiment name
    resume_training_itr = args.resume  # iteration from which to resume training
    test_policy_N = args.policy  # number of policy samples to take

    # FILE-PATHS #

    from gps import __file__ as gps_filepath  # set 'gps_filepath' as root gps filepath
    gps_filepath = os.path.abspath(gps_filepath)  # reformat as absolute path
    gps_dir = '/'.join(str.split(
        gps_filepath, '/')[:-3]) + '/'  # remove 'gps' part, for root directory
    exp_dir = gps_dir + 'experiments/' + exp_name + '/'  # create experiment directory
    hyperparams_file = __file__[:-7] + 'hyperparams.py'  # complete path to hyperparameter file

    # LOGGING OPTION #

    if args.silent:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.INFO)
    else:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.DEBUG)

    if args.new:  # if new experiment desired
        from shutil import copy  # import file copy

        if os.path.exists(exp_dir):  # if already exists
            sys.exit("Experiment '%s' already exists.\nPlease remove '%s'." %
                     (exp_name, exp_dir))  # exit from python
        os.makedirs(exp_dir)  # else mkdir

        prev_exp_file = '.previous_experiment'  # hidden file in python gps directory, IF by previous run
        prev_exp_dir = None
        try:  # attempt following code
            with open(prev_exp_file, 'r') as f:
                prev_exp_dir = f.readline(
                )  # read previous experiment directory from hidden file
            copy(prev_exp_dir + 'hyperparams.py',
                 exp_dir)  # copy over hyperparameters from previous exp run
            if os.path.exists(
                    prev_exp_dir +
                    'targets.npz'):  # if target numpy array file exists
                copy(prev_exp_dir + 'targets.npz',
                     exp_dir)  # copy to new experiment directory
        except IOError as e:  # throw program terminating exception, unless IOError
            with open(hyperparams_file, 'w') as f:
                f.write(
                    '# To get started, copy over hyperparams from another experiment.\n'
                    +
                    '# Visit rll.berkeley.edu/gps/hyperparams.html for documentation.'
                )
                # if hyperparams were not copied over, instruct user on how to get started
        with open(prev_exp_file, 'w') as f:
            f.write(
                exp_dir
            )  # regardless of whether existed before, write new prev_exp hidden file

        exit_msg = ("Experiment '%s' created.\nhyperparams file: '%s'" %
                    (exp_name, hyperparams_file))  # base output message
        if prev_exp_dir and os.path.exists(prev_exp_dir):
            exit_msg += "\ncopied from     : '%shyperparams.py'" % prev_exp_dir
        sys.exit(exit_msg)  # if hyperparam file copied, also state where from
        # Finally, exit process, new experiment has been created, can now run again without '-n' argument

    if not os.path.exists(hyperparams_file):
        sys.exit("Experiment '%s' does not exist.\nDid you create '%s'?" %
                 (exp_name, hyperparams_file)
                 )  # if no hyperparams file, prompt to create one, and exit

    hyperparams = imp.load_source(
        'hyperparams',
        hyperparams_file)  # import hyperparams from hyperparam file

    if args.targetsetup:  # if target setup GUI option selected (for ROS only)
        try:
            import matplotlib.pyplot as plt
            from gps.agent.ros.agent_ros import AgentROS
            from gps.gui.target_setup_gui import TargetSetupGUI

            agent = AgentROS(hyperparams.config['agent'])
            TargetSetupGUI(hyperparams.config['common'], agent)

            plt.ioff()
            plt.show()
        except ImportError:
            sys.exit('ROS required for target setup.')
    elif test_policy_N:  # if testing current policy, how many policy samples to take at given iteration
        import random
        import numpy as np
        import matplotlib.pyplot as plt

        seed = hyperparams.config.get(
            'random_seed', 0)  # retrieve random_seed value from hyperparams
        random.seed(
            seed
        )  # initialize internal state of random num generator with fixed seed
        np.random.seed(
            seed
        )  # initialize internal state of numpy random num generator with fixed seed

        data_files_dir = exp_dir + 'data_files/'  # data file dir
        data_filenames = os.listdir(data_files_dir)  # all data files
        algorithm_prefix = 'algorithm_itr_'
        algorithm_filenames = [
            f for f in data_filenames if f.startswith(algorithm_prefix)
        ]  # all algorithm iteration files
        current_algorithm = sorted(
            algorithm_filenames,
            reverse=True)[0]  # current algorithm iteration filename
        current_itr = int(
            current_algorithm[len(algorithm_prefix):len(algorithm_prefix) +
                              2])  # current iteration number

        gps = GPSMain(hyperparams.config)  # initialise GPSMain object
        if hyperparams.config['gui_on']:
            test_policy = threading.Thread(target=lambda: gps.test_policy(
                itr=current_itr, N=test_policy_N
            ))  # define thread target (what is called on 'start' command)
            test_policy.daemon = True  # daemon threads are killed automatically on program termination
            test_policy.start()  # start thread process

            plt.ioff()  # turn interactive mode off
            plt.show()  # start mainloop for displaying plots
        else:
            gps.test_policy(
                itr=current_itr, N=test_policy_N
            )  # else, no seperate thread needed, start process as normal
    else:
        import random
        import numpy as np
        import matplotlib.pyplot as plt

        seed = hyperparams.config.get(
            'random_seed', 0)  # retrieve random_seed value from hyperparams
        random.seed(
            seed
        )  # initialize internal state of random num generator with fixed seed
        np.random.seed(
            seed
        )  # initialize internal state of numpy random num generator with fixed seed

        gps = GPSMain(hyperparams.config,
                      args.quit)  # initialise GPSMain object
        if hyperparams.config['gui_on']:
            run_gps = threading.Thread(
                target=lambda: gps.run(itr_load=resume_training_itr)
            )  # define thread target (what is called on 'start' command)
            run_gps.daemon = True  # daemon threads are killed automatically on program termination
            run_gps.start()  # start thread process

            plt.ioff()  # turn interactive mode off
            plt.show()  # start mainloop for displaying plots
        else:
            gps.run(
                itr_load=resume_training_itr
            )  # else, no seperate thread needed, start process as normal
Пример #55
0
    c = copy.copy(alist)
    print(alist)
    print(c)  #[1, 2, 3, ['a', 'b']]        [1, 2, 3, ['a', 'b']]
    alist.append(5)
    print(alist)  #[1, 2, 3, ['a', 'b'], 5]
    print(c)  #[1, 2, 3, ['a', 'b']]
    alist[3]  #['a', 'b']
    alist[3].append('cccc')
    #里面的子对象被改变了
    print(alist)  #[1, 2, 3, ['a', 'b', 'cccc'], 5]
    print(c)  #[1, 2, 3, ['a', 'b', 'cccc']]


def deepcopy():  #深拷贝,包含对象里面的自对象的拷贝,所以原始对象的改变不会造成深拷贝里任何子元素的改变
    d = copy.deepcopy(alist)
    print(alist)  #[1, 2, 3, ['a', 'b']]
    print(d)  #[1, 2, 3, ['a', 'b']]始终没有改变
    alist.append(5)
    print(alist)  # [1, 2, 3, ['a', 'b'], 5]
    print(d)  # [1, 2, 3, ['a', 'b']]始终没有改变
    alist[3]  #['a', 'b']
    alist[3].append("ccccc")
    print(alist)  # [1, 2, 3, ['a', 'b', 'ccccc'], 5]
    print(d)  # [1, 2, 3, ['a', 'b']]始终没有改变


if __name__ == '__main__':
    copy()
    copy2()
    deepcopy()
Пример #56
0
 def copy(self):
   """Returns a shallow copy of this object."""
   return copy(self)
Пример #57
0
 def fork(self, global_node):
     copy(target=self.weight, source=global_node.weight)
def LogDistMatrix(distm):
    tmpMatrix = copy(distm)
    np.putmask(tmpMatrix, distm < 1 / np.e, 1 / np.e)
    return np.log(tmpMatrix)
Пример #59
0
 def __deepcopy__(self):
     '''Similar to copy'''
     return copy(self)
Пример #60
0
def automatedMove(CB, player):
    if player == "black":
        opposingPlayer = "red"
    else:
        opposingPlayer = "black"
    possibles = getPossibles(CB, player)
    temp = []
    maxIndex = 0
    if len(possibles["jumps"]) > 0:
        for index in range(len(possibles["jumps"])):
            if len(possibles["jumps"][index]) > len(
                    possibles["jumps"][maxIndex]):
                maxIndex = index
        if len(possibles["jumps"][maxIndex]) > 5:
            if DEBUG:
                print("returning long jump ", possibles["jumps"][maxIndex])
                junk = input()
            return possibles["jumps"][maxIndex]
        else:
            for block in possibles["blocks"]:
                if block in possibles["jumps"]:
                    temp.append(block)
            if len(temp) > 0:
                index = random.randint(0, len(temp) - 1)
                if DEBUG:
                    print("returning jump that is a block ", temp[index])
                    junk = input()
                return temp[index]
        index = random.randint(0, len(possibles["jumps"]) - 1)
        if DEBUG:
            print("returning random jump ", possibles["jumps"][index])
            junk = input()
        return possibles["jumps"][index]

    if len(possibles["blocks"]) > 0:
        index = random.randint(0, len(possibles["blocks"]) - 1)
        if DEBUG:
            print("returning block ", possibles["blocks"][index])
            junk = input()
        return possibles["blocks"][index]

    if len(possibles["crownings"]) > 0:
        index = random.randint(0, len(possibles["crownings"]) - 1)
        if DEBUG:
            print("returning crowning ", possibles["crownings"][index])
            junk = input()
        return possibles["crownings"][index]

    if len(possibles["moves"]) > 0:
        #CAN Enemy jump me next time?
        enemyP = getPossibles(CB, opposingPlayer)
        if len(enemyP["jumps"]) > 0:
            avoid = []
            for jump in enemyP["jumps"]:
                for move in possibles["moves"]:
                    if move[3:5] == jump[3:5]:
                        avoid.append(move)
            if len(avoid) > 0:
                index = random.randint(0, len(avoid) - 1)
                return avoid[index]
        if DEBUG:
            print("MOVES:", possibles["moves"])
        testSet = copy(possibles["moves"])
        for move in testSet:
            if DEBUG:
                print("Testing move ", move)
            copyCB = deepcopy(CB)
            fromRow = ord(move[0]) - 65
            fromCol = int(move[1])
            toRow = ord(move[3]) - 65
            toCol = int(move[4])
            t = copyCB[fromRow][fromCol]
            copyCB[fromRow][fromCol] = 0
            copyCB[toRow][toCol] = t
            enemyP = getPossibles(copyCB, opposingPlayer)
            if DEBUG:
                print("ENEMY CAN JUMP THESE ", enemyP["jumps"])
            if len(enemyP["jumps"]) > 0:
                if DEBUG:
                    print("Removing bad move ", move)
                possibles["moves"].remove(move)
                if DEBUG:
                    print("possibles after removal ", possibles["moves"])
                    junk = input()
        if DEBUG:
            print("SAFE MOVES = ", possibles["moves"])
        preferredMoves = []
        if len(possibles["moves"]) > 0:
            for move in possibles["moves"]:
                if int(move[4]) == 0 or int(move[4]) == 7:
                    preferredMoves.append(move)
            if len(preferredMoves) > 0:
                minDist = 100  #
                bestMove = preferredMoves[0]  #
                for eachMove in preferredMoves:  #
                    centroids = centroid(CB, opposingPlayer)
                    dist = abs((ord(eachMove[0]) - 65) - centroids[0]) + abs(
                        int(eachMove[1]) - centroids[1])
                    if dist < minDist:  #
                        bestMove = eachMove  #
                        minDist = dist
                #index=random.randint(0,len(preferredMoves)-1)
                index = preferredMoves.index(bestMove)
                if DEBUG:
                    print(
                        "PREFERRED move that does not put me into jumpable position:",
                        preferredMoves[index])
                    junk = input()
                return preferredMoves[index]
            else:
                index = random.randint(0, len(possibles["moves"]) - 1)
                if DEBUG:
                    print(
                        "Random move that does not put me into jumpable position:",
                        possibles["moves"][index])
                    junk = input()
                return possibles["moves"][index]
        else:
            possibles = getPossibles(CB, player)
            index = random.randint(0, len(possibles["moves"]) - 1)
            if DEBUG:
                print("randomly picking move ", possibles["moves"][index])
                junk = input()
            return possibles["moves"][index]