Example #1
0
    def generatePacket(self, N = 0):
        if N == 0:
            N = int(round(n.rand()*290)*2) # max length is two 
        src = int(round(n.rand() * (2**6-1)))
        typ = int(round(n.rand() * (2**2-1)))
        
        id = self.ids[typ, src]
        
        self.ids[typ, src] = self.ids[typ, src] + 1
        
        
        data = n.zeros(N+6, dtype=n.uint8)

        data[0] = (id >> 24) & 0xFF
        data[1] = (id >> 16) & 0xFF
        data[2] = (id >> 8) & 0xFF
        data[3] = (id) & 0xFF
        data[4] = typ
        data[5] = src

        # random data
        for i in range(N):
            data[i+6] = int(n.rand() * 255)

        return (src, typ, data)
Example #2
0
def epsilon_greedy(Q,
                   state,
                   all_actions,
                   current_total_steps=0,
                   epsilon_initial=1,
                   epsilon_final=0.2,
                   anneal_timesteps=10000,
                   eps_type="constant"):

    if eps_type == 'constant':
        epsilon = epsilon_final
        # ADD YOUR CODE SNIPPET BETWEEN EX 3.1
        # Implemenmt the epsilon-greedy algorithm for a constant epsilon value
        # Use epsilon and all input arguments of epsilon_greedy you see fit
        # It is recommended you use the np.random module
        if np.rand() > epsilon:
            index,_ = np.max(Q,axis=1)
        else:
            index = np.rand()
        action = None
        # ADD YOUR CODE SNIPPET BETWEEN EX 3.1

    elif eps_type == 'linear':
        # ADD YOUR CODE SNIPPET BETWEENEX  3.2
        # Implemenmt the epsilon-greedy algorithm for a linear epsilon value
        # Use epsilon and all input arguments of epsilon_greedy you see fit
        # use the ScheduleLinear class
        # It is recommended you use the np.random module
        action = None
        # ADD YOUR CODE SNIPPET BETWEENEX  3.2

    else:
        raise "Epsilon greedy type unknown"

    return action
Example #3
0
def test__as_json(test_client):
    sample_document = {
        "val": np.rand(20), 
        "val_2": np.rand(100)
    }
    sample_document_result = test_client._as_json(sample_document)
    assert sample_document_result == sample_document
Example #4
0
def load_array(input_config: Dict, prefix=None, shape=None):
    """
    Load array from file or list into numpy array.
    :param input_config: External data input file config.
    :param prefix: Additional path to search for input file.
    :return: Data stored in a numpy array.
    """
    # get path to either source file or direct to the embedded array
    data = input_config["data"]
    dtype = input_config["data_type"].type
    if isinstance(data, str):  # source file or autogenerated
        m = re.match(r"([^:]+):(.+)", data)
        if m:
            is_scalar = ("input_dims" in input_config
                         and input_config["input_dims"] is not None
                         and len(input_config["input_dims"]) <= 0)
            if shape is None and not is_scalar:
                raise ValueError(
                    "Must provide shape when using generated inputs")
            if m.group(1) == "constant":
                val = float(m.group(2))
                if is_scalar:
                    return val
                else:
                    arr = np.empty(shape, dtype=dtype)
                    arr[:] = val
                    return arr
            elif m.group(1) == "random":
                m1 = re.match(r"([0-9\.]+).+([0-9\.]+)", m.group(2))
                rand_min = float(m1.group(1))
                rand_max = float(m2.group(1))
                if is_scalar:
                    return (rand_min + (rand_max - rand_min) * np.rand(1))[0]
                else:
                    return rand_min + (rand_max - rand_min) * np.rand(*shape)
            else:
                raise ValueError("Unknown generation: " + m.group(1))
        path = data
        if not os.path.isfile(path):
            if prefix is not None:
                path = os.path.join(prefix, path)
            if not os.path.isfile(path):
                raise FileNotFoundError("File {} does not exists.".format(data))
        if path.endswith(".csv"):
            return np.genfromtxt(path, dtype, delimiter=',')
        elif path.endswith(".dat"):
            return np.fromfile(path, dtype)
        else:
            raise ValueError("Invalid file type: " + path)
    elif shape is not None and len(shape) == 0:
        return dtype(data)
    elif isinstance(data, np.ndarray):  # embedded array: already numpy array
        return data
    else:
        # embedded array: collection item -> convert to np array
        return np.array(data, dtype=input_config["data_type"].type)
Example #5
0
File: tests.py Project: eteq/nbodpy
def uniform_ics(nparticles, pscale=1, vscale=1, masses=None):
    """
    Generates `nparticles` particles with uniformly distributed locations
    (centered at the origin with box size `pscale`) and uniform velocities.
    """
    from core import Particles

    pos = pscale * (np.rand(3, nparticles) - .5)
    vel = vscale * (np.rand(3, nparticles) - .5)

    if masses is None:
        return Particles(pos, vel)
    else:
        return Particles(pos, vel, masses)
Example #6
0
File: tests.py Project: eteq/nbodpy
def uniform_ics(nparticles,pscale=1,vscale=1,masses=None):
    """
    Generates `nparticles` particles with uniformly distributed locations
    (centered at the origin with box size `pscale`) and uniform velocities.
    """
    from core import Particles
    
    pos = pscale*(np.rand(3,nparticles)-.5)
    vel = vscale*(np.rand(3,nparticles)-.5)
    
    if masses is None:
        return Particles(pos,vel)
    else:
        return Particles(pos,vel,masses)
 def __init__(self, num_layers, num_neurons, num_inputs, num_outputs):   #num_layers is the number of hidden layers + 1, num_neurons is the number of neurons in each layer
     self.num_layers = num_layers    #assuming atleast two hidden layer present
     self.num_neurons = num_neurons  #there will be one extra neuron to accomodate for the bias term
     self.hidden_weights = np.rand(num_layers-2, num_neurons+1, num_neurons+1)  # weights[i][j][k] gives weight between jth neuron of (i-1)th and kth neuron of ith layer, -1 corresponds to input
     self.input_weights = np.rand(num_inputs + 1, num_neurons) # input_weights[i][j] gives weight b/w ith neuron of input and jth neurons of first output layer
     self.output_weights = np.rand(num_neurons+1, num_outputs)
     self.hidden_weights_derivatives = np.zeros(num_layers-2, num_neurons+1, num_neurons+1)  # weights[i][j][k] gives weight between jth neuron of (i-1)th and kth neuron of ith layer, -1 corresponds to input
     self.input_weights_derivatives = np.zeros(num_inputs + 1, num_neurons) # input_weights[i][j] gives weight b/w ith neuron of input and jth neurons of first output layer
     self.output_weights_derivatives = np.zeros(num_neurons+1, num_outputs)
     self.hidden_neuron_outputs = np.ones((num_neurons+1, num_layers-1))
     self.output_layer_outputs = np.zeros(num_outputs)
     self.hidden_neuron_output_derivatives =  np.zeros((num_neurons+1, num_layers-1)) #Not much significance of using zeros
     self.output_layer_output_derivatives = np.zeros(num_outputs)
     self.num_inputs = num_inputs
     self.num_outputs = num_outputs
Example #8
0
    def generatePacket(self, N = 0, src = -1, typ = -1):
        if N == 0:
            N = int(round(n.rand()*290)*2) # max length is two 
        
        if src < 0:
            src = int(round(n.rand() * (2**6-1)))

        if typ < 0:
            typ = int(round(n.rand() * (2**2-1)))
        
        id = self.ids[typ, src]
        
        self.ids[typ, src] = self.ids[typ, src] + 1
        
        return DataPacket(src, typ, id, N=N)
Example #9
0
    def train(self, num_batches, batch_size, burn_in_length, sequence_length,
              online_replay_buffer=None, supervised_replay_buffer=None,
              supervised_chance=0.25, writer=None):
        """
        Trains R2D3 style with 2 replay buffers
        """
        assert not online_replay_buffer == supervised_replay_buffer == None

        for batch in range(1, num_batches + 1):
            buff_choice = np.rand()
            if(online_replay_buffer is None or buff_choice < supervised_chance):
                replay_buffer = supervised_replay_buffer
            else:
                replay_buffer = online_replay_buffer

            while(not replay_buffer.ready_to_sample(batch_size)):
                pass

            rollouts, idxs, is_weights = replay_buffer.sample(batch_size)

            loss, new_errors = self.train_batch(rollouts, burn_in_length,
                                                sequence_length)
            replay_buffer.update_priorities(new_errors, idxs)

            if(writer is not None):
                if(buff_choice < supervised_chance):
                    writer.add_summary("Supervised Loss", loss, batch)
                else:
                    writer.add_summary("Online Loss", loss, batch)

                writer.add_summary("Loss", loss, batch)
Example #10
0
def _initialization(data, k, useElements, useQuasiRandom):
    if useElements:
        if useQuasiRandom:
            #k-means++
            barycenters = _np.zeros((k, data.shape[1]), dtype=_np.float64)
            lenData = data.shape[0]
            available = _np.ones(lenData, bool)
            for i in range(k):
                if i == 0:
                    weights = _np.ones(lenData, dtype=_np.float64) / lenData
                else:
                    weights = (
                        (barycenters[i - 1, :] - data) ** 2
                    ).sum(1) * available
                    weights /= weights.sum()
                choice = _np.random.choice(lenData, 1, False, weights)
                available[choice] = False
                barycenters[i, :] = data[choice]
        else:
            #default
            barycenters = data[_np.random.choice(data.shape[0], k, False)]
    else:
        minValues, maxValues = data.min(0), data.max(0)
        deltaValues = maxValues - minValues
        if useQuasiRandom:
            raise('Not Implemented Yet')
        else:
            #random values in value space
            barycenters = (
                _np.rand(k, data.shape[1], dtype=_np.float64) * deltaValues
            ) + minValues
    return barycenters
Example #11
0
    def short_sim(self, T=500, batches=100):
        # End time of simulation
        Tend = T * self.Ts
        time = np.linspace(0, Tend, T * Ts)

        # input and function to sample input
        u = self.random_bit_stream(T)
        u_interp = interp.interp1d(time, u[:, 0])

        # Construct function for the dynamcis of the system
        # dyn = lambda t, x: self.dynamics(x, u_interp(t))
        x0 = np.zeros((batches, 2 * self.N))

        def dyn(t, x):
            X = x.reshape(2 * self.N, -1)
            dX = self.dynamics(X, u_interp(t)[None])
            dx = dX.reshape(-1)
            return dx

        sol = integrate.solve_ivp(dyn, [0.0, Tend], x0, t_eval=time)
        t = sol['t']
        x = sol['y']
        u = u.T

        U = u[:, :-1].T
        X = x[:, :-1].T
        Y = x[:, 1:].T + self.v_sd * (np.rand(x[:, 1:].T.shape) - 0.5)

        data = IO_data(U, Y)
        loader = DataLoader(data,
                            batch_size=self.batchsize,
                            shuffle=False,
                            num_workers=1)
        return loader
Example #12
0
def initialize_weight_matrix(n_in,
                             n_out,
                             mag='xavier',
                             base_dist='normal',
                             rng=None):
    """
    Initialize a weight matrix
    :param n_in: Number of input units
    :param n_out: Number of output units
    :param mag: The magnitude, or a string identifying how to calculate the magnitude.
        String options can be:
            'xavier-forward' - Best for preserving variance of a linear, tanh, or sigmoidal network across layers.
            'xavier-both': - A compromize between preserving the variance of the forward, backward pass
            'xavier-relu': - Best for preserving variance on the forward pass in a ReLU net.
    :param base_dist: 'normal' or 'uniform', or a function taking (n_in, n_out) and returning a (n_in, n_out) array
    :param rng: Random number generator or seed
    :return: A shape (n_in, n_out) initial weight matrix.
    """
    rng = get_rng(rng)

    w_base = rng.randn(n_in, n_out) if base_dist == 'normal' else \
        (np.rand(n_in, n_out) - 0.5)*np.sqrt(12) if base_dist=='uniform' else \
        bad_value(base_dist)

    mag_number = \
        np.sqrt(2./(n_in+n_out)) if mag in ('xavier', 'xavier-both') else \
        np.sqrt(1./n_in) if mag=='xavier-forward' else \
        np.sqrt(2./n_in) if mag=='xavier-relu' else \
        mag if isinstance(mag, numbers.Real) else \
        bad_value(mag)

    return w_base * mag_number
Example #13
0
def mi(x, y, k=3, base=2):
    """ Mutual information of x and y
        x, y should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
        if x is a one-dimensional scalar and we have four samples
    """
    assert len(x) == len(y), "Lists should have same length"
    assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
    intens = 1e-10  # small noise to break degeneracy, see doc.
    x = [list(p + intens * np.rand(len(x[0]))) for p in x]
    y = [list(p + intens * np.rand(len(y[0]))) for p in y]
    points = zip2(x, y)
    # Find nearest neighbors in joint space, p=inf means max-norm
    tree = ss.cKDTree(points)
    dvec = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in points]
    a, b, c, d = avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(
        len(x))
    return (-a - b + c + d) / log(base)
Example #14
0
 def jitMCQ(self,x,y,jsig, mcp):
     from PYME.Analysis.QuadTree import pointQT
     Imc = numpy.rand(len(x)) < mcp
     qt = pointQT.qtRoot(-250,250, 0, 500)
     if type(jsig) == numpy.ndarray:
         jsig = jsig[Imc]
     for xi, yi in zip(x[Imc] +  jsig*numpy.random.normal(size=Imc.sum()), y[Imc] +  jsig*numpy.random.normal(size=Imc.sum())):
         qt.insert(pointQT.qtRec(xi, yi, None))
     self.setQuads(qt, 100, True)
Example #15
0
 def jitMCQ(self,x,y,jsig, mcp):
     from PYME.Analysis.points.QuadTree import pointQT
     Imc = numpy.rand(len(x)) < mcp
     qt = pointQT.qtRoot(-250,250, 0, 500)
     if type(jsig) == numpy.ndarray:
         jsig = jsig[Imc]
     for xi, yi in zip(x[Imc] +  jsig*numpy.random.normal(size=Imc.sum()), y[Imc] +  jsig*numpy.random.normal(size=Imc.sum())):
         qt.insert(pointQT.qtRec(xi, yi, None))
     self.setQuads(qt, 100, True)
    def __init(self, input_size, output_size, hidden_size=64):

        # init weights
        self.W_f = np.randn(input_size + hidden_size,
                        hidden_size) / 1000  # forget gate weights
        self.W_i = np.randn(input_size + hidden_size,
                        hidden_size) / 1000  # input gate weights
        self.W_c = np.randn(input_size + hidden_size,
                        hidden_size) / 1000  # keep gate weights
        self.W_o = np.rand(input_size + hidden_size,
                       hidden_size) / 1000  # output gate weights

        self.W_y = np.rand(input_size + hidden_size, hidden_size) / 1000

        self.b_f = np.zeros((hidden_size, 1))  # forget gate bias
        self.b_i = np.zeros((hidden_size, 1))  # input gate bias
        self.b_c = np.zeros((hidden_size, 1))  # candidate gate bias
        self.b_o = np.zeros((hidden_size, 1))  # output gate
        self.b_y = np.zeros((hidden_size, 1))  # y bias
Example #17
0
def rnd_draw(p):
    """
    To draw a sample using a probability distribution.
    """
    a = [0]
    a = np.append(a, np.cumsum(p[0:-1])) / np.sum(p)
    b = cumsum(p) / np.sum(p)
    toss = rand()
    k = np.intersect1d(np.nonzero(a < toss)[0], np.nonzero(b >= toss)[0])
    return k
Example #18
0
	def __init__(self, inputs, outputs, nTrainingExamples, stepSize =  0.1, nLayers = 3, regularizationParameter = 5, crossValidation = false, learningRate =1e-4):
		self.Xs = inputs
		self.Ys = outputs
		self.m = nTrainingExamples
		self.n = stepSize
		self.l = nLayers
		self.Lambda = regularizationParameter
		self.t = len(self.Xs[0]) + 1
		self.theta = np.rand(self.t * self.t * self.l).reshape(self.l ,self.t, self.t) * 2 - 1 # Range [-1,1]
		self.nodesInInput = len(self.Xs[0]) + 1
		self.nodesInOutput = len(self.Ys[0])
		self.crossValidation = crossValidation
		self.learningRate = learningRate
Example #19
0
    def parse_param(self, n, maxiter=1e4, tol=1e-8, xtrue=[], randinit=False, x0=[]):
        niter = maxiter
        xcur = np.zeros(n)     # this allows us to keep v = 1/n
        xcur = xcur + self.v
        if randinit:
            xcur = np.rand(n, 1)
            xcur = xcur / sum(xcur)
        if x0:
            xcur = np.zeros(n) + x0

        hist = np.zeros((niter, 1))
        ihist = np.zeros((n, niter))

        return niter, xcur, hist, ihist
Example #20
0
def visualize_protein_markers_tsne(vlm,
                                   protein_markers,
                                   pc_targets,
                                   visualize_clusters=False,
                                   colormap='inferno'):
    """
    Visualizes an array of protein markers in protein principal component space. The components to plot are given by
    the list pc_targets. If visualize_clusters is selected, an additional cluster-colored plot is generated.
    Useful for iterative manual procedure to identify clusters based on characteristic markers.
    """
    array_proteins = vlm.adt_names
    pcs = vlm.prot_tsne
    pc_zi = [pc_targets[0] - 1, pc_targets[1] - 1]

    n_addit = int(visualize_clusters)

    nrows = int(np.ceil((len(protein_markers) + n_addit) / 5))
    #     print(nrows)

    f, ax = plt.subplots(nrows=nrows, ncols=5, figsize=(12, 0.25 + 2 * nrows))
    ax = ax.flatten()

    for j in range(len(protein_markers)):
        prot_name = protein_markers[j]
        sc = ax[j].scatter(pcs[:, pc_zi[0]],
                           pcs[:, pc_zi[1]],
                           s=3,
                           c=np.log(vlm.P[array_proteins == prot_name][0] + 1),
                           alpha=0.2,
                           cmap=colormap)
        plt.colorbar(sc)
        ax[j].set_title(prot_name)

    if visualize_clusters:

        if hasattr(vlm, 'cluster_ID') and hasattr(vlm, 'COLORS'):
            col = vlm.COLORS[vlm.cluster_ID]
        else:
            COLORS = np.rand(np.amax(cluster_ID) + 1, 3)
            col = COLORS[vlm.cluster_ID]

        ax[-1].scatter(pcs[:, pc_zi[0]],
                       pcs[:, pc_zi[1]],
                       s=3,
                       c=col,
                       alpha=0.9)

    for k in range(len(ax)):
        ax[k].axis('off')
Example #21
0
def init_params(options):
    #全局(非lstm)参数, 这是对于嵌入行为和分类器而言的
    params = OrderedDict()
    #嵌入
    randn = np.rand(options['n_words'], options['dim_proj'])
    params['Wemb'] = (0.01 * randn).astype(config.floatX)
    params = get_layer(options['encoder'])[0](options,
                                              params,
                                              prefix=options['encoder'])
    #初始化分类器参数
    params['U'] = 0.01 * numpy.random.randn(
        options['dim_proj'], options['ydim']).astype(config.floadX)
    params['b'] = np.zeros((options['ydim'], )).astype(config.floatX)

    return params
Example #22
0
 def forward(self,source,target,teacher_force_ratio=0.5):
     batch_size=source.shape[1]
     target_len=target.shape[0]
     #here we will save the outputs
     outputs=torch.zeros(target_len,batch_size,target_vocab_size).to(device)
     target_vocab_size=len(english.vocab)
     hidden,cell=self.encoder(source)
     #this is start token
     x=target[0]
     for i in range(1,len(target)):
         output,(hidden,cell)=self.decoder(x,hidden,cell)
         outputs[i]=output
         best_guess=output.argmax[1]
         random=np.rand()
         x=target[i] if random > teacher_force_ratio else best_guess
     return ouputs
Example #23
0
    def __init__(self, *args, **kwargs):
        kwds["style"] = wx.DEFAULT_FRAME_STYLE
        wx.Frame.__init__(self, *args, **kwargs)
        self.panel_1 = wx.ScrolledWindow(self, -1, style=wx.TAB_TRAVERSAL)

        self.ns = range(40)
        self.ts = range(0, 200, 20)
        self.bitmaps = {}
        for ni, n in enumerate(self.ns):
            self.bitmaps[n] = {}
            for ti, t in enumerate(self.ts):
                a = np.rand(32*3, 32)*255
                a = a.round().astype(np.uint8)
                im = wx.ImageFromData(32, 32, a.data)
                im = im.Scale(64, 64)
                self.bitmaps[n][t] = wx.StaticBitmap(parent=self.panel_1, bitmap=im.ConvertToBitmap())

        #self.Bind(wx.EVT_PAINT, self.OnPaint)
        self.__set_properties()
        self.__do_layout()
Example #24
0
    def __init__(self, *args, **kwargs):
        kwds["style"] = wx.DEFAULT_FRAME_STYLE
        wx.Frame.__init__(self, *args, **kwargs)
        self.panel_1 = wx.ScrolledWindow(self, -1, style=wx.TAB_TRAVERSAL)

        self.ns = range(40)
        self.ts = range(0, 200, 20)
        self.bitmaps = {}
        for ni, n in enumerate(self.ns):
            self.bitmaps[n] = {}
            for ti, t in enumerate(self.ts):
                a = np.rand(32 * 3, 32) * 255
                a = a.round().astype(np.uint8)
                im = wx.ImageFromData(32, 32, a.data)
                im = im.Scale(64, 64)
                self.bitmaps[n][t] = wx.StaticBitmap(
                    parent=self.panel_1, bitmap=im.ConvertToBitmap())

        #self.Bind(wx.EVT_PAINT, self.OnPaint)
        self.__set_properties()
        self.__do_layout()
Example #25
0
    def post(self, request, endpoint_name, format=None):
        algorithm_status = self.request.query_params.get(
            "status", "production")
        algorithm_version = self.request.query_params.get("version")

        algs = MLAlgorithm.objects.filter(parent_endpoint__name=endpoint_name,
                                          status__status=algorithm_status,
                                          status__active=True)

        if algorithm_version is not None:
            algs = algs.filter(version=algorithm_version)

        if len(algs) == 0:
            return Response(
                {
                    "status":
                    'Error',
                    'message':
                    'ML algorithm selection is ambiguous. Please specify algorithm version'
                },
                status=status.HTTP_400_BAD_REQUEST)
        alg_index = 0
        if algorithm_status == 'ab_testing':
            alg_index = 0 if np.rand() < 0.5 else 1

        algorithm_object = registry.endpoints[algs[alg_index].id]
        prediction = algorithm_object.compute_prediction(request.data)

        label = prediction['label'] if 'label' in prediction else 'error'
        ml_request = MLRequest(input_data=json.dumps(request.data),
                               full_response=prediction,
                               response=label,
                               feedback='',
                               parent_mlalgorithm=algs[alg_index])
        ml_request.save()

        prediction['request_id'] = ml_request.id

        return Response(prediction)
Example #26
0
 def inititalize_stickbreaking_Z(customers, alpha=10, reducedprop=1.):
     """ Simple implementation of the Indian Buffet Process. Generates a binary matrix with
     customers rows and an expected number of columns of alpha * sum(1,1/2,...,1/customers).
     This implementation uses a stick-breaking construction.
     An additional parameter permits reducing the expected number of times a dish is tried. """
     # max number of dishes is distributed according to Poisson(alpha*sum(1/i))
     _lambda = alpha * np.sum(1. / np.array(list(range(1, customers + 1))))
     alpha /= reducedprop
     # we give it 2 standard deviations as cutoff
     maxdishes = int(_lambda + np.sqrt(_lambda) * 2) + 1
     res = np.zeros((customers, maxdishes), dtype=bool)
     stickprops = np.beta(alpha, 1, maxdishes) # nu_i
     currentstick = 1.
     dishesskipped = 0
     for i, nu in enumerate(stickprops):
         currentstick *= nu
         dishestaken = np.rand(customers) < currentstick * reducedprop
         if np.sum(dishestaken) > 0:
             res[:, i - dishesskipped] = dishestaken
         else:
             dishesskipped += 1
             return res[:, :maxdishes - dishesskipped]
Example #27
0
def writePacket(fid, packet):
    """ randomly writes the packet at some random location inside
    of the event cycle and fills in the other cycles
    """

    indata = packet.getInputData()
    
    N = len(indata) * 2
    M = 980
    startrange = M - N
    p = int(n.rand()*startrange)

    s = 1000 - N - p
    
    for i in range(p):
        fid.write("0 00\n")

    for i in indata:
        fid.write("1 %2.2X\n" % ((i >> 8) & 0xFF))
        fid.write("1 %2.2X\n" % (i & 0xFF))

    for i in range(s):
        fid.write("0 00\n")
Example #28
0
def writePacket(fid, packet):
    """ randomly writes the packet at some random location inside
    of the event cycle and fills in the other cycles.

    We use an offset of 4 to strip off the calculated ids.
    
    """
    os = 4
    
    N = len(packet)-os
    M = 980
    startrange = M - N
    p = int(n.rand()*startrange)

    s = 1000 - N - p
    
    for i in range(p):
        fid.write("0 00\n")

    for i in packet[os:]:
        fid.write("1 %2.2X\n" % i)

    for i in range(s):
        fid.write("0 00\n")
Example #29
0
    # kernel = np.ones((3, 3))
    # kernel[1, 1] = 0
    next_img = np.zeros_like(img)

    neighb = convolve2d(
        img,
        kernel,
        mode='same',
    )

    next_img[:] = (~((neighb < 3) & (img == 1)))


# ==================================================================================================

input_data = [np.rand(1, 1, 10, 10) for _ in range(10000)]
target_data = [game(x) for x in range(100000)]

from torch.utils.data import Dataset, DataLoader
from torch import optim


def generate_data(img):
    return img


class GameOfLifeData(Dataset):
    """Face Landmarks dataset."""
    def __init__(self, ):
        """
        Args:
Example #30
0
from numpy import zeros, array, rand, linspace, random, Float, ones, arange, sum
from _ppssampler import set_rng_state, get_rng_state, local_irand, local_rand
from _ppssampler import equalprob, equalprobi
from population import *
from messages import restart, elapsedcpu

# Check passing RandomKit state back and forth with numpy.
print '*** Checking RandomKit state maintenance ***'
state0 = random.get_state()
id, key, pos = state0
set_rng_state(id, key, pos)
print 'Should be same:', rand(), local_rand()
for i in range(100):
    local_rand()
print 'Should be dif: ', rand(), local_rand()
random.set_state(get_rng_state())
print 'Should be same:', rand(), local_rand()
print

# Check equal-weight samplers.
print '*** Checking equiprobability samplers ***'
pool = zeros(10)  # Workspace
samp = equalprobi(5,10,pool)
print 'equalprobi:', samp

# Check equalprob with a list input.
pool = range(10,20)
samp = equalprob(5,pool)
print 'equalprob:', samp
# Check the original pool is unchanged.
Example #31
0
File: ops.py Project: comadan/nn
def rand(shape, rg=[0, 1]):
    if USE_GPU:
        return gnp.rand(shape) * (rg[1] - rg[0]) + rg[0]
    else:
        return gnp.random.rand(*shape) * (rg[1] - rg[0]) + rg[0]
        d = D[:,j]
        alpha_update = alpha[j] + np.dot(d, x - np.dot(D, alpha)) / sqnorm(d)
        alpha[j] = soft_threshold(alpha_update, lmbda)

        obj_values.append(sqnorm(x - np.dot(D,alpha)))
        
    return alpha, obj_values
    


# In[42]:

m = 30                     # dimension of each atom
p = 100                      # number of atoms
rand = np.random.rand
D = rand(m*p).reshape(m,p) # dictionary
x = rand(m)

# normalise by column
D = D / np.linalg.norm(D,axis=0)
x = x / np.linalg.norm(x)

alpha, obj_values = coordinate_descent(D, x, 0.01)
s = "sparsity: {}".format(len(np.nonzero(alpha)[0]))
plt.subplot(3, 1, 1)
plt.title(s)
plt.plot(np.array(obj_values))

alpha, obj_values = coordinate_descent(D, x, 0.5)
s2 = "sparsity: {}".format(len(np.nonzero(alpha)[0]))
plt.subplot(3, 1, 2)
Example #33
0
def plot_subfaults(subfaults, plot_centerline=False, slip_color=False, \
            cmap_slip=None, cmin_slip=None, cmax_slip=None, \
            plot_rake=False, xylim=None, plot_box=True):

    """
    Plot each subfault projected onto the surface.
    Describe parameters...
    """

    import matplotlib
    import matplotlib.pyplot as plt

    #figure(44,(6,12)) # For CSZe01
    #clf()

    # for testing purposes, make random slips:
    test_random = False


    max_slip = 0.
    min_slip = 0.
    for subfault in subfaults:
        if test_random:
            subfault['slip'] = 10.*numpy.rand()  # for testing
            #subfault['slip'] = 8.  # uniform
        slip = subfault['slip']
        max_slip = max(abs(slip), max_slip)
        min_slip = min(abs(slip), min_slip)
    print "Max slip, Min slip: ",max_slip, min_slip

    if slip_color:
        if cmap_slip is None:
            cmap_slip = matplotlib.cm.jet
            #white_purple = colormaps.make_colormap({0.:'w', 1.:[.6,0.2,.6]})
            #cmap_slip = white_purple
        if cmax_slip is None:
            cmax_slip = max_slip
        if cmin_slip is None:
            cmin_slip = 0.
        if test_random:
			print "*** test_random == True so slip and rake have been randomized"
        
    y_ave = 0.
    for subfault in subfaults:

        set_fault_xy(subfault)

        # unpack parameters:
        paramlist = """x_top y_top x_bottom y_bottom x_centroid y_centroid
            depth_top depth_bottom depth_centroid x_corners y_corners""".split()

        for param in paramlist:
            cmd = "%s = subfault['%s']" % (param,param)
            exec(cmd)

        y_ave += y_centroid


        # Plot projection of planes to x-y surface:
        if plot_centerline:
            plt.plot([x_top],[y_top],'bo',label="Top center")
            plt.plot([x_centroid],[y_centroid],'ro',label="Centroid")
            plt.plot([x_top,x_centroid],[y_top,y_centroid],'r-')
        if plot_rake:
            if test_random:
                subfault['rake'] = 90. + 30.*(rand()-0.5)  # for testing
            tau = (subfault['rake'] - 90) * numpy.pi/180.
            plt.plot([x_centroid],[y_centroid],'go',markersize=5,label="Centroid")
            dxr = x_top - x_centroid
            dyr = y_top - y_centroid
            x_rake = x_centroid + numpy.cos(tau)*dxr - numpy.sin(tau)*dyr
            y_rake = y_centroid + numpy.sin(tau)*dxr + numpy.cos(tau)*dyr
            plt.plot([x_rake,x_centroid],[y_rake,y_centroid],'g-',linewidth=1)
        if slip_color:
            slip = subfault['slip']
            #c = cmap_slip(0.5*(cmax_slip + slip)/cmax_slip)
            #c = cmap_slip(slip/cmax_slip)
            s = min(1, max(0, (slip-cmin_slip)/(cmax_slip-cmin_slip)))
            c = cmap_slip(s*.99)  # since 1 does not map properly with jet
            plt.fill(x_corners,y_corners,color=c,edgecolor='none')
        if plot_box:
            plt.plot(x_corners, y_corners, 'k-')

    slipax = plt.gca()
        
    y_ave = y_ave / len(subfaults)
    slipax.set_aspect(1./numpy.cos(y_ave*numpy.pi/180.))
    plt.ticklabel_format(format='plain',useOffset=False)
    plt.xticks(rotation=80)
    if xylim is not None:
        plt.axis(xylim)
    plt.title('Fault planes')
    if slip_color:
        cax,kw = matplotlib.colorbar.make_axes(slipax)
        norm = matplotlib.colors.Normalize(vmin=cmin_slip,vmax=cmax_slip)
        cb1 = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap_slip, norm=norm)
        #import pdb; pdb.set_trace()
    plt.sca(slipax) # reset the current axis to the main figure
Example #34
0
    if a.ndim != 2:
        raise ValueError("a must be 2-d")
    code = r"""
    int i,j;
    for(i=1;i<Na[0]-1;i++) {
        for(j=1;j<Na[1]-1;j++) {
            B2(i,j) = A2(i,j) + A2(i-1,j)*0.5 +
                      A2(i+1,j)*0.5 + A2(i,j-1)*0.5
                      + A2(i,j+1)*0.5
                      + A2(i-1,j-1)*0.25
                      + A2(i-1,j+1)*0.25
                      + A2(i+1,j-1)*0.25
                      + A2(i+1,j+1)*0.25;
        }
    }
    """
    b = zeros_like(a)
    weave.inline(code,['a','b'])
    return b

a = [None]*10
print example1(a)
print a

a = rand(512,512)
b = arr(a)

h = [[0.25,0.5,0.25],[0.5,1,0.5],[0.25,0.5,0.25]]
import scipy.signal as ss
b2 = ss.convolve(h,a,'same')
Example #35
0
import numpy as np
import scipy.misc

q_1 = np.zeros(8)
q_2 = np.ones(7)
q_3 = 5*np.ones(6)

r_1 = np.arange(6)
r_2 = np.arange(0,6,0.5)
r_3 = np.arange(5,-1,-1)

s_1 = []

t_1 = np.linspace(0,5,90)
t_2 = np.linspace(5,0,80)

u_1 = np.logspace(-2,2,9)
u_2 = np.log10(u_1)

v_1 = np.exp(np.arange(-2,4))
v_2 = np.log(v_1)

w_1 = 2**np.arange(0,11)
w_2 = 1 / 2**np.arange(0,6)

x_1 = scipy.misc.factorial(np.arange(0,7))

y_1 = np.rand(10)
y_2 = np.randn(10)
y_3 = np.randint(5,15, size = 10)
Example #36
0
def demo5 (*itest) :
    """demo5 () or demo5 (i)
      Run examples of use of pl3d.i, plwf.i, and slice3.i.  With
      argument I = 1, 2, or 3, run that particular demonstration.
      Read the source code to understand the details of how the
      various effects are obtained.

      demo5 (1) demonstrates the various effects which can be obtained
      with the plwf (plot wire frame) function.
      demo5 (2) demonstrates shading effects controlled by the light3
      function
      demo5 (3) demonstrates the slice3, slice2, and pl3tree functions,
      as well as changing the orientation of the 3D object
    """
    global making_movie
    if len (itest) == 0 or itest [0] == 1 :
        set_draw3_ (0)
        x = span (-1, 1, 64, 64)
        y = transpose (x)
        z = (x + y) * exp (-6.*(x*x+y*y))
        limits_(square = 1)
        print  "(plot wire frame) plwf,z,y,x"
        orient3 ( )
        light3 ( )
        plwf (z, y, x)
        [xmin, xmax, ymin, ymax] = draw3(1) # not necessary interactively
        limits (xmin, xmax, ymin, ymax)
        plt("opaque wire mesh", .30, .42)
        paws ( )
        print "plwf,z,y,x, shade=1,ecolor=\"red\""
        plwf(z,y,x,shade=1,ecolor="red")
        [xmin, xmax, ymin, ymax] = draw3(1) # not necessary interactively
        limits (xmin, xmax, ymin, ymax)
        paws()
        print "plwf,z,y,x, shade=1,edges=0"
        plwf(z,y,x,shade=1,edges=0)
        [xmin, xmax, ymin, ymax] = draw3(1) # not necessary interactively
        limits (xmin, xmax, ymin, ymax)
        paws ( )
        light3 ( diffuse=.1, specular=1., sdir=array([0,0,-1]))
        [xmin, xmax, ymin, ymax] = draw3(1)
        limits (xmin, xmax, ymin, ymax)
        paws ( )
        light3 ( diffuse=.5, specular=1., sdir=array([1,.5,1]))
        [xmin, xmax, ymin, ymax] = draw3 (1)
        limits (xmin, xmax, ymin, ymax)
        paws ( )
        light3 ( ambient=.1,diffuse=.1,specular=1.,
               sdir=array([[0,0,-1],[1,.5,1]]),spower=array([4,2]))
        [xmin, xmax, ymin, ymax] = draw3(1)
        limits (xmin, xmax, ymin, ymax)
        paws ( )
    if len (itest) == 0 or itest [0] == 2 :
        set_draw3_ (0)
        x = span (-1, 1, 64, 64)
        y = transpose (x)
        z = (x + y) * exp (-6.*(x*x+y*y))
        print "light3 function demo- default lighting"
        orient3 ( )
        light3 ( )
        plwf (z,y,x,shade=1,edges=0)
        [xmin, xmax, ymin, ymax] = draw3 (1) # not necessary interactively
        limits (xmin, xmax, ymin, ymax)
        paws( )
        print "light3,diffuse=.2,specular=1"
        light3(diffuse=.2,specular=1)
        limits_(square = 1)
        [xmin, xmax, ymin, ymax] = draw3(1) # not necessary interactively
        limits (xmin, xmax, ymin, ymax)
        paws()
        print "light3,sdir=[cos(theta),.25,sin(theta)]  -- movie"
        making_movie = 1
        movie(demo5_light, lims = [xmin, xmax, ymin, ymax])
        making_movie = 0
        fma()
        demo5_light(1)
        paws()
        light3()
    if len (itest) == 0 or itest [0] == 3 :
        nx = demo5_n [0]
        ny = demo5_n [1]
        nz = demo5_n [2]
        xyz = zeros ( (3, nx, ny, nz), Float)
        xyz [0] = multiply.outer ( span (-1, 1, nx), ones ( (ny, nz), Float))
        xyz [1] = multiply.outer ( ones (nx, Float),
           multiply.outer ( span (-1, 1, ny), ones (nz, Float)))
        xyz [2] = multiply.outer ( ones ( (nx, ny), Float), span (-1, 1, nz))
        r = sqrt (xyz [0] ** 2 + xyz [1] **2 + xyz [2] **2)
        theta = arccos (xyz [2] / r)
        phi = arctan2 (xyz [1] , xyz [0] + logical_not (r))
        y32 = sin (theta) ** 2 * cos (theta) * cos (2 * phi)
        m3 = mesh3 (xyz, funcs = [r * (1. + y32)])
        del r, theta, phi, xyz, y32

        print "   test uses " + `(nx - 1) * (ny - 1) * (nz - 1)` + " cells"
        elapsed = [0., 0., 0.]
        elapsed = timer_ (elapsed)
        elapsed0 = elapsed

        [nv, xyzv, dum] = slice3 (m3, 1, None, None, value = .50)
            # (inner isosurface)
        [nw, xyzw, dum] = slice3 (m3, 1, None, None, value = 1.)
            # (outer isosurface)
        pxy = plane3 ( array ([0, 0, 1], Float ), zeros (3, Float))
        pyz = plane3 ( array ([1, 0, 0], Float ), zeros (3, Float))
        [np, xyzp, vp] = slice3 (m3, pyz, None, None, 1)
            # (pseudo-colored slice)
        [np, xyzp, vp] = slice2 (pxy, np, xyzp, vp)
            # (cut slice in half)
        [nv, xyzv, d1, nvb, xyzvb, d2] = \
            slice2x (pxy, nv, xyzv, None)
        [nv, xyzv, d1] = \
            slice2 (- pyz, nv, xyzv, None)
            # (...halve one of those halves)
        [nw, xyzw, d1, nwb, xyzwb, d2] = \
            slice2x ( pxy , nw, xyzw, None)
            # (split outer in halves)
        [nw, xyzw, d1] = \
            slice2 (- pyz, nw, xyzw, None)

        elapsed = timer_ (elapsed)
        timer_print ("slicing time", elapsed - elapsed0)

        fma ()
        print "split_palette,\"earth.gp\" -- generate palette for pl3tree"
        split_palette ("earth.gp")
        print "gnomon -- turn on gnomon"
        gnomon (1)

        print "pl3tree with 1 slicing plane, 2 isosurfaces"
        clear3 ()
        # Make sure we don't draw till ready
        set_draw3_ (0)
        pl3tree (np, xyzp, vp, pyz)
        pl3tree (nvb, xyzvb)
        pl3tree (nwb, xyzwb)
        pl3tree (nv, xyzv)
        pl3tree (nw, xyzw)
        orient3 ()
        light3 (diffuse = .2, specular = 1)
        limits ()
        limits (square=1)
        demo5_light (1)
        paws ()
        hcp ()

        print "spin3 animated rotation, use rot3 or orient3 for one frame"
        # don't want limits to autoscale during animation
        lims = limits ( )
        spin3 ()
        limits ( ) # back to autoscaling
        demo5_light (1)
        paws ()

        light3 ()
        gnomon (0)
        limits (square = 1)
        palette ("gray.gp")

    if len (itest) == 0 or itest [0] == 4 :
        f = PR ('./bills_plot')
        n_nodes = f.NumNodes
        n_z = f.NodesOnZones
        x = f.XNodeCoords
        y = f.YNodeCoords
        z = f.ZNodeCoords
        c = f.ZNodeVelocity
        n_zones = f.NumZones
        # Put vertices in right order for Gist
        n_z = transpose (
           take (transpose (n_z), array ( [0, 4, 3, 7, 1, 5, 2, 6]),axis=0))
        m3 = mesh3 (x, y, z, funcs = [c], verts = n_z ) # [0:10])
        [nv, xyzv, cv] = slice3 (m3, 1, None, None, 1, value = .9 * max (c) )
        pyz = plane3 ( array ([1, 0, 0], Float ), zeros (3, Float))
        pxz = plane3 ( array ([0, 1, 0], Float ), zeros (3, Float))

        # draw a colored plane first
        fma ()
        clear3 ()
        # Make sure we don't draw till ready
        set_draw3_ (0)
        [np, xyzp, vp] = slice3 (m3, pyz, None, None, 1)
        pl3tree (np, xyzp, vp, pyz, split = 0)
        palette ("rainbow.gp")
        orient3 ()
        demo5_light (1)
        paws ()


#     [nv, xyzv, d1] = \
#         slice2 (- pyz, nv, xyzv, None)
        [nw, xyzw, cw] = slice3 (m3, 1, None, None, 1, value = .9 * min (c) )
#     [nw, xyzw, d1] = \
#         slice2 (- pyz, nw, xyzw, None)
        [nvi, xyzvi, cvi] = slice3 (m3, 1, None, None, 1, value = .5 * min (c) )
        [nvi, xyzvi, cvi] = \
            slice2 (- pyz, nvi, xyzvi, cvi)
        [nvj, xyzvj, cvj] = slice3 (m3, 1, None, None, 1, value = .5 * max (c) )
        [nvj, xyzvj, cvj] = \
            slice2 (- pyz, nvj, xyzvj, cvj)

        fma ()
        print "gnomon -- turn on gnomon"
        gnomon (1)
        clear3 ()
        # Make sure we don't draw till ready
        set_draw3_ (0)
        pl3tree (nv, xyzv) # , cv)
        pl3tree (nw, xyzw) # , cw)
        pl3tree (nvi, xyzvi) # , cvi)
        pl3tree (nvj, xyzvj) # , cvi)
        orient3 ()
        light3 (ambient = 0, diffuse = .5, specular = 1, sdir = [0, 0, -1])
        limits (square=1)
        palette ("gray.gp")
        demo5_light (1)
        paws ()

        print "spin3 animated rotation, use rot3 or orient3 for one frame"
        # don't want limits to autoscale during animation
        spin3 ()
        limits ( ) # back to autoscaling
        demo5_light (1)
        paws ()

        light3 ()
        gnomon (0)
        palette ("gray.gp")

        draw3 ( 1 )
        paws ()
        clear3 ()
        del nv, xyzv, cv, nw, xyzw, cw, nvi, xyzvi, cvi, nvj, xyzvj, cvj
        # Make sure we don't draw till ready
        set_draw3_ (0)
        for i in range (8) :
            [nv, xyzv, cv] = slice3 (m3, 1, None, None, 1, value = .9 * min (c) +
                i * (.9 * max (c) - .9 * min (c)) / 8.)
            [nv, xyzv, d1] = \
                slice2 (pxz, nv, xyzv, None)
            pl3tree (nv, xyzv)
        orient3 ()
        light3 (ambient = 0, diffuse = .5, specular = 1, sdir = [0, 0, -1])
        limits (square=1)
        palette ("heat.gp")
        demo5_light (1)
        paws ()
        spin3 ()
        limits ( ) # back to autoscaling
        demo5_light (1)
        paws ()
        demo5_light (1)
        paws ()

    if len (itest) == 0 or itest [0] == 5 :
        # Try bert's data
        f = PR ('./berts_plot')
        nums = array ( [63, 63, 49], Int)
        dxs = array ( [2.5, 2.5, 10.], Float )
        x0s = array ( [-80., -80., 0.0], Float )
        c = f.c

        m3 = mesh3 (nums, dxs, x0s, funcs = [transpose (c)])
        [nv, xyzv, dum] = slice3 (m3, 1, None, None, value = 6.5)
        fma ()
        clear3 ()
        print "gnomon -- turn on gnomon"
        gnomon (1)
        # Make sure we don't draw till ready
        set_draw3_ (0)
        palette ("rainbow.gp")
        pl3tree (nv, xyzv)
        orient3 ()
        light3 (diffuse = .2, specular = 1)
        limits (square=1)
        demo5_light (1)
        paws ()
        spin3 ()
        demo5_light (1)
        paws ()
    if len (itest) == 0 or itest [0] == 6 :
        # Try Bill's irregular mesh
        f = PR ("ball.s0001")
        ZLss = f.ZLstruct_shapesize
        ZLsc = f.ZLstruct_shapecnt
        ZLsn = f.ZLstruct_nodelist
        x = f.sap_mesh_coord0
        y = f.sap_mesh_coord1
        z = f.sap_mesh_coord2
        c = f.W_vel_data
        # Now we need to convert this information to avs-style data
        istart = 0 # beginning index into ZLstruct_nodelist
        NodeError = "NodeError"
        ntet = 0
        nhex = 0
        npyr = 0
        nprism = 0
        nz_tet = []
        nz_hex = []
        nz_pyr = []
        nz_prism = []
        for i in range (4) :
            if ZLss [i] == 4 : # TETRAHEDRON
                nz_tet = reshape (ZLsn [istart: istart + ZLss [i] * ZLsc [i]],
                         (ZLsc [i], ZLss [i]))
                ntet = ZLsc [i]
                istart = istart + ZLss [i] * ZLsc [i]
            elif ZLss[i] == 5 : # PYRAMID
                nz_pyr = reshape (ZLsn [istart: istart + ZLss [i] * ZLsc [i]],
                         (ZLsc [i], ZLss [i]))
                npyr = ZLsc [i]
                # Now reorder the points (bill has the apex last instead of first)
                nz_pyr = transpose (
                   take (transpose (nz_pyr), array ( [4, 0, 1, 2, 3]),axis=0))
                istart = istart + ZLss [i] * ZLsc [i]
            elif ZLss[i] == 6 : # PRISM
                nz_prism = reshape (ZLsn [istart: istart + ZLss [i] * ZLsc [i]],
                         (ZLsc [i], ZLss [i]))
                nprism = ZLsc [i]
                # now reorder the points (bill goes around a square face
                # instead of traversing the opposite sides in the same direction.
                nz_prism = transpose (
                   take (transpose (nz_prism), array ( [0, 1, 3, 2, 4, 5]),axis=0))
                istart = istart + ZLss [i] * ZLsc [i]
            elif ZLss[i] == 8 : # HEXAHEDRON
                nz_hex = reshape (ZLsn [istart: istart + ZLss [i] * ZLsc [i]],
                         (ZLsc [i], ZLss [i]))
                # now reorder the points (bill goes around a square face
                # instead of traversing the opposite sides in the same direction.
                nz_hex = transpose (
                   take (transpose (nz_hex), array ( [0, 1, 3, 2, 4, 5, 7, 6]),axis=0))
                nhex = ZLsc [i]
                istart = istart + ZLss [i] * ZLsc [i]
            else :
                raise NodeError, `ZLss[i]` + "is an incorrect number of nodes."

        m3 = mesh3 (x, y, z, funcs = [c], verts = [nz_tet, nz_pyr, nz_prism,
           nz_hex])
        [nv, xyzv, cv] = slice3 (m3, 1, None, None, 1, value = .9 * max (c) )
        pyz = plane3 ( array ([1, 0, 0], Float ), zeros (3, Float))
        pxz = plane3 ( array ([0, 1, 0], Float ), zeros (3, Float))

        # draw a colored plane first
        fma ()
        clear3 ()
        # Make sure we don't draw till ready
        set_draw3_ (0)
        [np, xyzp, vp] = slice3 (m3, pyz, None, None, 1)
        pl3tree (np, xyzp, vp, pyz, split = 0)
        palette ("rainbow.gp")
        orient3 ()
        limits (square=1)
        demo5_light (1)
        paws ()

        [nw, xyzw, cw] = slice3 (m3, 1, None, None, 1, value = .9 * min (c) )
        [nvi, xyzvi, cvi] = slice3 (m3, 1, None, None, 1, value = .1 * min (c) )
        [nvi, xyzvi, cvi] = \
            slice2 (- pyz, nvi, xyzvi, cvi)
        [nvj, xyzvj, cvj] = slice3 (m3, 1, None, None, 1, value = .1 * max (c) )
        [nvj, xyzvj, cvj] = \
            slice2 (- pyz, nvj, xyzvj, cvj)
        [nvii, xyzvii, cvii] = slice3 (m3, 1, None, None, 1,
           value = 1.e-12 * min (c) )
        [nvii, xyzvii, cvii] = \
            slice2 (- pyz, nvii, xyzvii, cvii)
        [nvjj, xyzvjj, cvjj] = slice3 (m3, 1, None, None, 1,
           value = 1.e-12 * max (c) )
        [nvjj, xyzvjj, cvjj] = \
            slice2 (- pyz, nvjj, xyzvjj, cvjj)

        fma ()
        print "gnomon -- turn on gnomon"
        gnomon (1)
        clear3 ()
        # Make sure we don't draw till ready
        set_draw3_ (0)
        pl3tree (nv, xyzv) # , cv)
        pl3tree (nw, xyzw) # , cw)
        pl3tree (nvi, xyzvi) # , cvi)
        pl3tree (nvj, xyzvj) # , cvj)
        pl3tree (nvii, xyzvii) # , cvii)
        pl3tree (nvjj, xyzvjj) # , cvjj)
        orient3 ()
        light3 (ambient = 0, diffuse = .5, specular = 1, sdir = [0, 0, -1])
        limits (square=1)
        palette ("gray.gp")
        demo5_light (1)
        paws ()
        palette ("heat.gp")
        paws ()


    if len (itest) == 0 or itest [0] == 7 :
        # test plwf on the sombrero function
        # compute sombrero function
        x = arange (-20, 21, dtype = Float)
        y = arange (-20, 21, dtype = Float)
        z = zeros ( (41, 41), Float)
        r = sqrt (add.outer ( x ** 2, y **2)) + 1e-6
        z = sin (r) / r
        fma ()
        clear3 ()
        gnomon (0)
        # Make sure we don't draw till ready
        set_draw3_ (0)
        palette ("rainbow.gp")
        limits (square=1)
        orient3 ()
        light3 ()
        plwf (z, fill = z, ecolor = "black")
        [xmin, xmax, ymin, ymax] = draw3 (1)
        limits (xmin, xmax, ymin, ymax)
        paws ()
        ##### Try smooth contours, log mode
        [nv, xyzv, dum] = slice3mesh (x, y, z)
        zmult = max (max (abs (x)), max (abs (y)))
        plzcont (nv, xyzv, contours = 20, scale = "normal")
        [xmin, xmax, ymin, ymax] = draw3 (1)
        limits (xmin, xmax, ymin, ymax)
        paws ()
        plzcont (nv, xyzv, contours = 20, scale = "lin", edges=1)
        [xmin, xmax, ymin, ymax] = draw3 (1)
        limits (xmin, xmax, ymin, ymax)
        paws ()
        plwf (z, fill = z, shade = 1, ecolor = "black")
        [xmin, xmax, ymin, ymax] = draw3 (1)
        limits (xmin, xmax, ymin, ymax)
        paws ()
        plwf (z, fill = z, shade = 1, edges = 0)
        [xmin, xmax, ymin, ymax] = draw3 (1)
        limits (xmin, xmax, ymin, ymax)
        paws ()
        light3(diffuse=.2,specular=1)
        print "light3,sdir=[cos(theta),.25,sin(theta)]  -- movie"
        making_movie = 1
        movie(demo5_light, lims = [xmin, xmax, ymin, ymax])
        making_movie = 0
        fma()
        demo5_light(1)
        paws ()
        plwf (z, fill = None, shade = 1, edges = 0)
        [xmin, xmax, ymin, ymax] = draw3 (1)
        palette("gray.gp")
        limits (xmin, xmax, ymin, ymax)
        paws ()


    if len (itest) == 0 or itest [0] == 8 :
        # test pl3surf on the sombrero function
        # compute sombrero function
        nc1 = 100
        nv1 = nc1 + 1
        br = - (nc1 / 2)
        tr = nc1 / 2 + 1
        x = arange (br, tr, dtype = Float) * 40. / nc1
        y = arange (br, tr, dtype = Float) * 40. / nc1
        z = zeros ( (nv1, nv1), Float)
        r = sqrt (add.outer ( x ** 2, y **2)) + 1e-6
        z = sin (r) / r
        # In order to use pl3surf, we need to construct a mesh
        # using mesh3. The way I am going to do that is to define
        # a function on the 3d mesh so that the sombrero function
        # is its 0-isosurface.
        z0 = min (ravel (z))
        z0 = z0 - .05 * abs (z0)
        maxz = max (ravel (z))
        maxz = maxz + .05 * abs (maxz)
        zmult = max (max (abs (x)), max (abs (y)))
        dz = (maxz - z0)
        nxnynz = array ( [nc1, nc1, 1], Int)
        dxdydz = array ( [1.0, 1.0, zmult*dz], Float )
        x0y0z0 = array ( [float (br), float (br), z0*zmult], Float )
        meshf = zeros ( (nv1, nv1, 2), Float )
        meshf [:, :, 0] = zmult*z - (x0y0z0 [2])
        meshf [:, :, 1] = zmult*z - (x0y0z0 [2] + dxdydz [2])

        m3 = mesh3 (nxnynz, dxdydz, x0y0z0, funcs = [meshf])
        fma ()
        # Make sure we don't draw till ready
        set_draw3_ (0)
        pldefault(edges=0)
        [nv, xyzv, col] = slice3 (m3, 1, None, None, value = 0.)
        orient3 ()
        pl3surf (nv, xyzv)
        lim = draw3 (1)
        limits (lim [0], lim [1], 1.5*lim [2], 1.5*lim [3])
        palette ("gray.gp")
        paws ()
        # Try new slicing function to get color graph
        [nv, xyzv, col] = slice3mesh (nxnynz [0:2], dxdydz [0:2], x0y0z0 [0:2],
           zmult * z, color = zmult * z)
        pl3surf (nv, xyzv, values = col)
        lim = draw3 (1)
        dif = 0.5 * (lim [3] - lim [2])
        limits (lim [0], lim [1], lim [2] - dif, lim [3] + dif)

        palette ("rainbow.gp")
        paws ()
        palette ("heat.gp")
        # Try plzcont--see if smooth mode possible
        plzcont (nv, xyzv)
        draw3 (1)
        paws ()
        plzcont (nv, xyzv, contours = 20)
        draw3 (1)
        paws ()
        plzcont (nv, xyzv, contours = 20, scale = "log")
        draw3(1)
        paws ()
        plzcont (nv, xyzv, contours = 20, scale = "normal")
        draw3(1)
        paws ()
    if len (itest) == 0 or itest [0] == 9 :
        vsf = 0.
        c = 1
        s = 1000.
        kmax = 25
        lmax = 35
        # The following computations define an interesting 3d surface.

        xr = multiply.outer (
           arange (1, kmax + 1, dtype = Float), ones (lmax, Float))
        yr = multiply.outer (
           ones (kmax, Float), arange (1, lmax + 1, dtype = Float))
        zt = 5. + xr + .2 * rand (kmax, lmax)   # ranf (xr)
        rt = 100. + yr + .2 * rand (kmax, lmax)   # ranf (yr)
        z = s * (rt + zt)
        z = z + .02 * z * rand (kmax, lmax)   # ranf (z)
        ut = rt/sqrt (rt ** 2 + zt ** 2)
        vt = zt/sqrt (rt ** 2 + zt ** 2)
        ireg =  multiply.outer ( ones (kmax, Float), ones (lmax, Float))
        ireg [0:1, 0:lmax]=0
        ireg [0:kmax, 0:1]=0
        ireg [1:15, 7:12]=2
        ireg [1:15, 12:lmax]=3
        ireg [3:7, 3:7]=0
        freg=ireg + .2 * (1. - rand (kmax, lmax))  # ranf (ireg))
        freg=array (freg, Float)
        #rt [4:6, 4:6] = -1.e8
        z [3:10, 3:12] = z [3:10, 3:12] * .9
        z [5, 5] = z [5, 5] * .9
        z [17:22, 15:18] = z [17:22, 15:18] * 1.2
        z [16, 16] = z [16, 16] * 1.1
        orient3 ()
        plwf (freg, shade = 1, edges = 0)
        [xmin, xmax, ymin, ymax] = draw3 (1)
        limits (xmin, xmax, ymin, ymax)
        paws ()
        nxny = array ( [kmax - 1, lmax - 1])
        x0y0 = array ( [0., 0.])
        dxdy = array ( [1., 1.])
        [nv, xyzv, col] = slice3mesh (nxny, dxdy, x0y0, freg)
        [nw, xyzw, col] = slice3mesh (nxny, dxdy, x0y0, freg + ut)
        pl3tree (nv, xyzv)
        pl3tree (nw, xyzw)
        draw3 (1)
        limits ( )
        paws ()

        light3 (ambient = 0, diffuse = .5, specular = 1, sdir = [0, 0, -1])
        demo5_light (1)
        paws ()

        [nv, xyzv, col] = slice3mesh (nxny, dxdy, x0y0, freg, color = freg)
        pl3surf (nv, xyzv, values = col)
        draw3 (1)
        palette ("rainbow.gp")
        paws ()
        [nv, xyzv, col] = slice3mesh (nxny, dxdy, x0y0, freg, color = z)
        pl3surf (nv, xyzv, values = col)
        draw3 (1)
        paws ()
        palette ("stern.gp")
        paws ()
        [nv, xyzv, col] = slice3mesh (nxny, dxdy, x0y0, z, color = z)
        pl3surf (nv, xyzv, values = col)
        orient3(phi=0,theta=0)
        draw3 (1)
        paws ()
        set_draw3_ (0)
        palette ("gray.gp")
        light3 ( diffuse=.1, specular=1., sdir=array([0,0,-1]))
        pl3surf (nv, xyzv)
        draw3 (1)
        paws ()

#     spin3 ()
#     paws ()

    hcp_finish ()
Example #37
0
def tirage ( m ):
    x = np.rand()
    y = np.rand()
    x = (x-0.5)*2*m
    y = (y-0.5)*2*m
    return x, y
Example #38
0
import numpy as np
import scipy.stats as sp
x=np.rand(1000,1)
y=np.rand(1000,1)

print(sp.pearsonr(x, y))
Example #39
0
    def synthesize(self, tau=50, mode=None):
        """Synthesize obervations.
        
        Parameters
        ----------
        tau : int (default = 50)
            Synthesize tau frames. 
            
        mode : Combination of ['s','q','r']
            's' - Use the original states
            'q' - Do NOT add state noise
            'r' - Add observations noise

            In case 's' is specified, 'tau' is ignored and the number of 
            frames equals the number of state time points.
            
        Returns
        -------
        I : numpy array, shape = (D, tau)
            Matrix with N D-dimensional column vectors as observations.
            
        X : numpy array, shape = (N, tau) 
            Matrix with N tau-dimensional state vectors.        
        """
        
        if not self._ready:
            raise ErrorDS("LDS not ready for synthesis!")
        
        Bhat = None
        Xhat = self._Xhat
        Qhat = self._Qhat
        Ahat = self._Ahat
        Chat = self._Chat
        Rhat = self._Rhat
        Yavg = self._Yavg
        initM0 = self._initM0
        initS0 = self._initS0
        nStates = self._nStates
        
        if mode is None:
            raise ErrorDS("No synthesis mode specified!")
        
        # use original states -> tau is restricted
        if mode.find('s') >= 0:
            tau = Xhat.shape[1]
        
        # data to be filled and returned     
        I = np.zeros((len(Yavg), tau))
        X = np.zeros((nStates, tau))
        
        if mode.find('r') >= 0:
            stdR = np.sqrt(Rhat)
        
        # add state noise, unless user explicitly decides against
        if not mode.find('q') >= 0:
            stdS = np.sqrt(initS0)
            (U, S, V) = np.linalg.svd(Qhat, full_matrices=False)
            Bhat = U*np.diag(np.sqrt(S)) 
    
        t = 0 
        Xt = np.zeros((nStates, 1))
        while (tau<0) or (t<tau):  
            # uses the original states
            if mode.find('s') >= 0:
                Xt1 = Xhat[:,t]
            # first state
            elif t == 0:
                Xt1 = initM0;
                if mode.find('q') < 0:
                    Xt1 += stdS*np.rand(nStates)
            # any further states (if mode != 's')
            else:
                Xt1 = Ahat*Xt
                if not mode.find('q') >= 0:
                    Xt1 = Xt1 + Bhat*np.rand(nStates)
            
            # synthesizes image
            It = Chat*Xt1 + np.reshape(Yavg,(len(Yavg),1))
         
            # adds observation noise
            if mode.find('r') >= 0:
                It += stdR*np.randn(length(Yavg))
            
            # save ...
            Xt = Xt1;
            I[:,t] = It.reshape(-1)
            X[:,t] = Xt.reshape(-1)
            t += 1
            
        return (I, X)
Example #40
0
def rand_init_weights(L_in, L_out, epsilon):
	"""

	"""
	return np.rand(L_out, 1 + L_in) * 2 * epsilon - epsilon
Example #41
0
 def step(self):
     for _ in xrange(self.rows*self.cols*3):
         data = np.rand() * 255
         data
Example #42
0
    def fit(self, p, x, y):
        
        plsq = leastsq(self.residuals, p0, args=(y, x))
        return plsq  


if __name__ == "__main__":
    from scipy.optimize import leastsq
    try:
        data = pylab.csv2rec('data.csv')
    except:
        print "Error! You need to give some data to fit."
        print ("I will generate some for you")
        # data 
        data = np.rand(100)
    

        
    pfh = FitHandler()
    pfh.plot_data(data)
    
    orders = [17]
    poly_list = []
    
    # Fitting
    for order in orders:
       pfit = pfh.fit_and_plot(data, order)
       poly_list.append(pfit)
    plt.title("Fitting the data")
    plt.legend()   
Example #43
0
         linestyle='--',
         color='b',
         marker='o',
         label='Autoencoder(7,4)')
'''
BPSK ERROR RATE
'''
N = 5000000
EbNodB_range = range(0, 11)
itr = len(EbNodB_range)
ber = [None] * itr

for n in range(0, itr):
    EbNodB = EbNodB_range[n]
    EbNo = 10.0**(EbNodB / 10.0)
    x = 2 * (np.rand(N) >= 0.5) - 1
    noise_std = 1 / np.sqrt(2 * EbNo)
    y = x + noise_std * np.randn(N)
    y_d = 2 * (y >= 0) - 1
    errors = (x != y_d).sum()
    ber[n] = 1.0 * errors / N

    print "EbNodB:", EbNodB
    print "Error bits:", errors
    print "Error probability:", ber[n]

plt.plot(EbNodB_range, ber, 'bo', EbNodB_range, ber, 'k')
plt.title('BPSK Modulation')

# plt.plot(EbNodB_range, ber, linestyle='', marker='o', color='r')
# plt.plot(EbNodB_range, ber, linestyle='-', color = 'b')
Example #44
0
        datapackets.append(data)

    pktcnt = len(datapackets)

        
    # write them in the event cycles:

    pos = 0

    fida = file('dataa.txt', 'w')
    fidb = file('datab.txt', 'w')
    
    while pos < pktcnt:
        # flip two coins, for tx on A and B;
        asend = n.rand() > 0.2
        if asend :
            writePacket(fida, datapackets[pos])
            pos += 1
        else:
            writeEmpty(fida)

        bsend = n.rand() > 0.2

        if pos != pktcnt:
            if bsend :
                writePacket(fidb, datapackets[pos])
                pos += 1
            else:
                writeEmpty(fidb)
Example #45
0
# -*- coding: utf-8 -*-

import os
from numpy import rand
from matplotlib.pyplot import *
from random import random

rands = []
for i in range(100):
    rands.append(random())
plot(rands)

from random import gauss

grands = []
for i in range(100):
    grands.append(gauss(0, 1))
plot(grands)

plot(rand(100))

os.system("pause")
Example #46
0
'''Main simulation loop '''

import numpy as np
import Bead
import physics

#Run parameters:
dim = 2
N_atoms = 100

atoms = []
x0 = np.rand(N_atoms)
y0 = np.rand(N-atoms)
for i in range(N_atoms):
    atoms[i] = (x0[i], v0[i], a0[i], 1)


for i in range(atom_len_1): # relapce atom_len_1 with numer of atoms
  force_vec=[0,0]
  atom1=0 # get atom1 from Bead. It should be a number between 1 and 5
  
  for j in range(atom_len_2): # replace atom_len_2 with the numer of atome atom1 interacts with
    atom2=0 # get interacting atom2 from bead. It should be a number between 1 and 5
    
    if ((atom1,atom2) in lipids) or ((atom2,atom1) in lipids:
      force_exp=spring_force(atom1,atom2,inter[atom1-1][atom2-1])
      angl=math.atan((atom2.x-atom1.x)[1]/(atom2.x-atom1.x)[0])
      force_vec[0]+=force_exp*math.cos(angl)
      force_vec[1]+=force_exp*math.sin(angl)
    else:
      force_exp=force(atom1,atom2,inter[atom1-1][atom2-1])
Example #47
0
print "appears in the Narcisse GUI."

paws ()

vsf = 0.
c = 1
s = 1000.
kmax = 25
lmax = 35
# The following computations define an interesting 3d surface.

xr = multiply.outer (
   arange (1, kmax + 1, dtype = Float), ones (lmax, Float))
yr = multiply.outer (
   ones (kmax, Float), arange (1, lmax + 1, dtype = Float))
zt = 5. + xr + .2 * rand (kmax, lmax)   # ranf (xr)
rt = 100. + yr + .2 * rand (kmax, lmax)   # ranf (yr)
z = s * (rt + zt)
z = z + .02 * z * rand (kmax, lmax)   # ranf (z)
ut = rt/sqrt (rt ** 2 + zt ** 2)
vt = zt/sqrt (rt ** 2 + zt ** 2)
ireg =  multiply.outer ( ones (kmax), ones (lmax))
ireg [0:1, 0:lmax]=0
ireg [0:kmax, 0:1]=0
ireg [1:15, 7:12]=2
ireg [1:15, 12:lmax]=3
ireg [3:7, 3:7]=0
freg=ireg + .2 * (1. - rand (kmax, lmax))  # ranf (ireg))
freg=array (freg, Float)
#rt [4:6, 4:6] = -1.e8
z [3:10, 3:12] = z [3:10, 3:12] * .9
Example #48
0
 def __init__(self, src, typ, id, data = None, N = 0):
     if data == None:
         self.data = (n.rand(N) * 2**16).astype(n.uint16)
     self.typ = typ
     self.src = src
     self.id = id
Example #49
0
import numpy as np
import buffermodule # buffer module reponsible for all interactions with the buffer

N=300
I=50
delay_ancien=0
steps=100
lr=0.001
batch_size = 10 
decay=0.99

neurnet={}
neurnet["w1"]=np.rand(300,50)/np.sqrt(50)
neurnet["w2"]=np.rand(300)/np.sqrt(300)

wei_buff={i:np.zeros(j)  for i,j in neurnet.items() }
rmsprop_cache = { k : np.zeros_like(v) for k,v in neurnet.items() } 

def sigmoid(x):
	return 1/(1+np.exp(-x))

def forwardp(i):
	h=np.dot(neurnet["w1"],i)
	h[h<0]=0
	o=np.dot(neurnet["w1"],h)
	o=sigmoid(o)
	return h,o

def prepo(dl):
	I[1:]=I[:-1]
	I[0]=dl
Example #50
0
    for i in range(4):
        for i in range(32):
            writePacket(fida, datapackets[pos])
            pos += 1
            writePacket(fidb, datapackets[pos])
            pos += 1

        for i in range(50 - 32):
            writeEmpty(fida)
            writeEmpty(fidb)

        
    while pos < pktcnt:

        # flip two coins, for tx on A and B;
        asend = n.rand() > 0.3
        if asend :
            writePacket(fida, datapackets[pos])
            pos += 1
        else:
            writeEmpty(fida)

        bsend = n.rand() > 0.3

        if pos != pktcnt:
            if bsend :
                writePacket(fidb, datapackets[pos])
                pos += 1
            else:
                writeEmpty(fidb)
Example #51
0
    
    host = "192.168.0.1"
    port = 4400
    
    buf = 1024
    addr = (host,port)
    UDPSock = socket(AF_INET,SOCK_DGRAM)

    UDPSock.bind(("192.168.0.2", 40000))
    
      
    UDPSock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)   
    
    UDPSock.sendto(data,addr)


fid = file('dataprops.txt', 'w')

for i in range(1000):

    id = int(round(n.rand() * (2**32 - 1)))
    src = int(round(n.rand() * 63))
    typ = int(round(n.rand() * 3))

    data = struct.pack(">BBI",  typ, src,  id)
    
    fid.write("%2.2X %2.2X %8.8X \n" % (typ, src, id))

    sendDataPacket(data)

Example #52
0
 def test(self, shape=(10, 1920, 1080, 3)):
     return self.solve(np.rand(shape))
Example #53
0
def GEMF_SIM(Para, Net, x0, StopCond, N, Directed=False):
    """
    An event-driven approach to simulate the stochastic process. 
    
    """
    M = Para[0]
    q = Para[1]
    L = Para[2]
    A_d = Para[3]
    A_b = Para[4]
    Neigh = Net[0]
    I1 = Net[1]
    I2 = Net[2]
    NeighW = Net[4]

    n_index = []
    j_index = []
    i_index = []
    #------------------------------
    bil = np.zeros((M, L))
    for l in range(L):
        bil[:, l] = A_b[l].sum(axis=1)  #l'th column is row sum of l'th A_b
    #------------------------------
    bi = np.zeros((M, M, L))
    for i in range(M):
        for l in range(L):
            bi[i, :, l] = A_b[l][i, :]
    #------------------------------
    di = A_d.sum(
        axis=1
    )  #The rate that we leave compartment i, due to nodal transitions
    #------------------------------
    #X = copy(x0)
    X = x0.astype(
        int32
    )  #since compartments are just numbers we are using integer types. If

    #------------------------------
    Nq = np.zeros((L, N))
    #------------------------------ver 2
    for n in range(N):
        for l in range(L):
            Nln = Neigh[l][I1[l][n]:I2[l][n] + 1]
            Nq[l][n] = sum((X[Nln] == q[l]) * NeighW[l][I1[l][n]:I2[l][n] + 1])
    #------------------------------ver2
    Rn = np.zeros(N)

    for n in range(N):
        #         print 'di[X[n]]: '+str(di[X[n]])
        #         print 'Nq[:,n]: '+str(Nq[:,n])
        #         print 'bil[X[n],:]: '+str(bil[X[n],:])
        #         print 'np.dot(bil[X[n],:],Nq[:,n]): '+str(np.dot(bil[X[n],:],Nq[:,n]))
        Rn[n] = di[X[n]] + np.dot(bil[X[n], :], Nq[:, n])
    R = sum(Rn)
    #------------------------------
    EventNum = StopCond[1]
    RunTime = StopCond[1]
    ts = []
    #     #------------------------------
    s = -1
    Tf = 0
    if len(Net) > 5:
        NNeigh = Net[5]
        NI1 = Net[6]
        NI2 = Net[7]
        NNeighW = Net[8]
        while Tf < RunTime:
            s += 1
            ts.append(-log(rand()) / R)
            #------------------------------ver 2
            ns = rnd_draw(Rn)
            iss = X[ns]

            js = rnd_draw(np.ravel(A_d[iss, :].T + np.dot(bi[iss], Nq[:, ns])))
            n_index.extend(ns)
            j_index.extend(js)
            i_index.extend(iss)
            #        -------------------- % Updateing ver2
            X[ns] = js
            R -= Rn[ns]
            Rn[ns] = di[js] + np.dot(bil[js, :], Nq[:, ns])
            R += Rn[ns]

            infl = (q == js
                    ).nonzero()[0]  #inf is layers with influencer compartment
            for l in infl:
                Nln = NNeigh[l][
                    NI1[l][ns]:NI2[l][ns] +
                    1]  #finding nodes that are adjacent to new infected
                IncreasEff = NNeighW[l][NI1[l][ns]:NI2[l][ns] + 1]
                Nq[l][Nln] += IncreasEff  #add the new infection weight edges
                k = 0
                for n in Nln:
                    Rn[n] += bil[X[n], l] * IncreasEff[k]
                    R += bil[X[n], l] * IncreasEff[k]
                    k += 1

            infl2 = (q == iss).nonzero()[
                0]  #infl2 is layers with influencer compartment

            #         print 'inf2: '+str(inf2)
            for l in infl2:  #finding influencer compartments
                Nln = NNeigh[int(
                    l)][int(NI1[l][ns]):int(NI2[l][ns]) +
                        1]  #finding nodes that are adjacent to new infected
                reducEff = NNeighW[int(l)][int(NI1[l][ns]):int(NI2[l][ns]) + 1]
                Nq[l][
                    Nln] -= reducEff  #subtract the new infection weight edges
                k = 0
                for n in Nln:
                    Rn[n] -= bil[X[n], l] * reducEff[k]
                    R -= bil[X[n], l] * reducEff[k]
                    k += 1
            if R < 1e-6:
                break
            Tf += ts[s]
    else:
        while Tf < RunTime:
            s += 1
            ts.append(-log(rand()) / R)
            #------------------------------ver 2
            ns = rnd_draw(Rn)
            iss = X[ns]

            js = rnd_draw(np.ravel(A_d[iss, :].T + np.dot(bi[iss], Nq[:, ns])))
            n_index.extend(ns)
            j_index.extend(js)
            i_index.extend(iss)
            #        -------------------- % Updateing ver2
            X[ns] = js
            R -= Rn[ns]
            Rn[ns] = di[js] + np.dot(bil[js, :], Nq[:, ns])
            R += Rn[ns]

            infl = (q == js
                    ).nonzero()[0]  #inf is layers with influencer compartment
            for l in infl:
                Nln = Neigh[int(
                    l)][int(I1[l][ns]):int(I2[l][ns]) +
                        1]  #finding nodes that are adjacent to new infected
                IncreasEff = NeighW[int(l)][int(I1[l][ns]):int(I2[l][ns]) + 1]
                Nq[l][Nln] += IncreasEff  #add the new infection weight edges
                k = 0
                for n in Nln:
                    Rn[n] += bil[X[n], l] * IncreasEff[k]
                    R += bil[X[n], l] * IncreasEff[k]
                    k += 1

            infl2 = (q == iss).nonzero()[
                0]  #infl2 is layers with influencer compartment
            #         print 'inf2: '+str(inf2)
            for l in infl2:  #finding influencer compartments
                Nln = Neigh[int(
                    l)][int(I1[l][ns]):int(I2[l][ns]) +
                        1]  #finding nodes that are adjacent to new infected
                reducEff = NeighW[int(l)][int(I1[l][ns]):int(I2[l][ns]) + 1]
                Nq[l][
                    Nln] -= reducEff  #subtract the new infection weight edges
                k = 0
                for n in Nln:
                    Rn[n] -= bil[X[n], l] * reducEff[k]
                    R -= bil[X[n], l] * reducEff[k]
                    k += 1
            if R < 1e-6:
                break
            Tf += ts[s]

    return ts, n_index, i_index, j_index
Example #54
0
 def __init__(self, inputs, outputs):
     self.hidden = round(inputs * 0.6666, 0)
     self.inputs = inputs
     self.outputs = outputs
     self.input_hidden_weights = numpy.rand((inputs.size, 1))
     self.hidden_output_weights = numpy.rand((self.hidden, 1))
Example #55
0
def rand_initialize_weights(l_in, l_out, epsilon_init=0.12):
    return np.rand(l_in, l_out) * 2 * epsilon_init - epsilon_init
Example #56
0
def min_err(train_data, d=D, reg=REG):
    u = np.rand((100, d))
    v = np.rand((d, 24983))
Example #57
0
def plot_subfaults(subfaults, plot_centerline=False, slip_color=False, \
            cmap_slip=None, cmin_slip=None, cmax_slip=None, \
            plot_rake=False, xylim=None, plot_box=True):

    """
    Plot each subfault projected onto the surface.
    Describe parameters...
    """

    import matplotlib
    import matplotlib.pyplot as plt

    #figure(44,(6,12)) # For CSZe01
    #clf()

    # for testing purposes, make random slips:
    test_random = False


    max_slip = 0.
    min_slip = 0.
    for subfault in subfaults:
        if test_random:
            subfault['slip'] = 10.*numpy.rand()  # for testing
            #subfault['slip'] = 8.  # uniform
        slip = subfault['slip']
        max_slip = max(abs(slip), max_slip)
        min_slip = min(abs(slip), min_slip)
    print("Max slip, Min slip: ",max_slip, min_slip)

    if slip_color:
        if cmap_slip is None:
            cmap_slip = matplotlib.cm.jet
            #white_purple = colormaps.make_colormap({0.:'w', 1.:[.6,0.2,.6]})
            #cmap_slip = white_purple
        if cmax_slip is None:
            cmax_slip = max_slip
        if cmin_slip is None:
            cmin_slip = 0.
        if test_random:
            print("*** test_random == True so slip and rake have been randomized")
        
    y_ave = 0.
    for subfault in subfaults:

        set_fault_xy(subfault)

        # unpack parameters:
        paramlist = """x_top y_top x_bottom y_bottom x_centroid y_centroid
            depth_top depth_bottom depth_centroid x_corners y_corners""".split()

        for param in paramlist:
            cmd = "%s = subfault['%s']" % (param,param)
            exec(cmd)

        y_ave += y_centroid


        # Plot projection of planes to x-y surface:
        if plot_centerline:
            plt.plot([x_top],[y_top],'bo',label="Top center")
            plt.plot([x_centroid],[y_centroid],'ro',label="Centroid")
            plt.plot([x_top,x_centroid],[y_top,y_centroid],'r-')
        if plot_rake:
            if test_random:
                subfault['rake'] = 90. + 30.*(rand()-0.5)  # for testing
            tau = (subfault['rake'] - 90) * numpy.pi/180.
            plt.plot([x_centroid],[y_centroid],'go',markersize=5,label="Centroid")
            dxr = x_top - x_centroid
            dyr = y_top - y_centroid
            x_rake = x_centroid + numpy.cos(tau)*dxr - numpy.sin(tau)*dyr
            y_rake = y_centroid + numpy.sin(tau)*dxr + numpy.cos(tau)*dyr
            plt.plot([x_rake,x_centroid],[y_rake,y_centroid],'g-',linewidth=1)
        if slip_color:
            slip = subfault['slip']
            #c = cmap_slip(0.5*(cmax_slip + slip)/cmax_slip)
            #c = cmap_slip(slip/cmax_slip)
            s = min(1, max(0, (slip-cmin_slip)/(cmax_slip-cmin_slip)))
            c = cmap_slip(s*.99)  # since 1 does not map properly with jet
            plt.fill(x_corners,y_corners,color=c,edgecolor='none')
        if plot_box:
            plt.plot(x_corners, y_corners, 'k-')

    slipax = plt.gca()
        
    y_ave = y_ave / len(subfaults)
    slipax.set_aspect(1./numpy.cos(y_ave*numpy.pi/180.))
    plt.ticklabel_format(format='plain',useOffset=False)
    plt.xticks(rotation=80)
    if xylim is not None:
        plt.axis(xylim)
    plt.title('Fault planes')
    if slip_color:
        cax,kw = matplotlib.colorbar.make_axes(slipax)
        norm = matplotlib.colors.Normalize(vmin=cmin_slip,vmax=cmax_slip)
        cb1 = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap_slip, norm=norm)
        #import pdb; pdb.set_trace()
    plt.sca(slipax) # reset the current axis to the main figure
Example #58
0
pool = zeros(10)
samp = equalprobi(5,10,pool)
print 'equalprobi:', samp

pool = range(10,20)
samp = equalprob(5,pool)
print 'equalprob:', samp
print pool

pool = array(pool)
samp = equalprob(5,pool)
print 'equalprob:', samp
print pool
print

wts = rand(6)
ind, sorted, cum = prepwts(wts)
print wts
print ind
print wts.take(ind)
print cum
print

wts = linspace(.1, 1., 1000)
ind, sorted, cumwt = prepwts(wts)
nsamp = 10
cumrat = wtratios(nsamp, sorted, cumwt[-1])
ntry, samp = SampfordPPS(nsamp, cumwt, cumrat)

state0 = random.get_state()
id, key, pos = state0